diff --git a/.github/allowed-actions.js b/.github/allowed-actions.js new file mode 100644 index 0000000000000..4fb894758060d --- /dev/null +++ b/.github/allowed-actions.js @@ -0,0 +1,7 @@ +// This is a whitelist of GitHub Actions that are approved for use in this project. +// If a new or existing workflow file is updated to use an action or action version +// not listed here, CI will fail. + +module.exports = [ + 'gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236', // gaurav-nelson/github-action-markdown-link-check@v1.0.8 +] diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..d782bb80f7539 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + labels: ["A2-insubstantial", "B0-silent", "C1-low"] + schedule: + interval: "daily" diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml new file mode 100644 index 0000000000000..ee03075176995 --- /dev/null +++ b/.github/workflows/check-labels.yml @@ -0,0 +1,16 @@ +name: Check labels + +on: + pull_request: + types: [labeled, opened, synchronize, unlabeled] + +jobs: + check-labels: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check labels + run: bash ${{ github.workspace }}/.maintain/github/check_labels.sh + env: + GITHUB_PR: ${{ github.event.pull_request.number }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml new file mode 100644 index 0000000000000..868569911d471 --- /dev/null +++ b/.github/workflows/md-link-check.yml @@ -0,0 +1,19 @@ +name: Check Links + +on: + pull_request: + branches: + - master + push: + branches: + - master + +jobs: + markdown-link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236 + with: + use-quiet-mode: 'yes' + config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/mlc_config.json b/.github/workflows/mlc_config.json new file mode 100644 index 0000000000000..e7e620b39e0a9 --- /dev/null +++ b/.github/workflows/mlc_config.json @@ -0,0 +1,7 @@ +{ + "ignorePatterns": [ + { + "pattern": "^https://crates.io", + } + ] +} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cbb56fcf72679..4a410cf3e5dfd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,13 +24,15 @@ stages: - check - test - build - - post-build-test - - chaos-env - - chaos - publish - deploy - flaming-fir +workflow: + rules: + - if: $CI_COMMIT_TAG + - if: $CI_COMMIT_BRANCH + variables: &default-vars GIT_STRATEGY: fetch GIT_DEPTH: 100 @@ -38,7 +40,7 @@ variables: &default-vars DOCKER_OS: "debian:stretch" ARCH: "x86_64" # FIXME set to release - CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.10" + CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.11" CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder" default: @@ -55,8 +57,6 @@ default: .kubernetes-build: &kubernetes-build tags: - kubernetes-parity-build - environment: - name: parity-build interruptible: true .docker-env: &docker-env @@ -75,11 +75,6 @@ default: tags: - linux-docker -workflow: - rules: - - if: $CI_COMMIT_TAG - - if: $CI_COMMIT_BRANCH - .test-refs: &test-refs rules: - if: $CI_PIPELINE_SOURCE == "web" @@ -132,7 +127,7 @@ check-signed-tag: <<: *kubernetes-build rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - ./.maintain/gitlab/check_signed.sh @@ -153,6 +148,19 @@ test-dependency-rules: script: - .maintain/ensure-deps.sh +test-prometheus-alerting-rules: + stage: check + image: paritytech/tools:latest + <<: *kubernetes-build + rules: + - if: $CI_COMMIT_BRANCH + changes: + - .gitlab-ci.yml + - .maintain/monitoring/**/* + script: + - promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml + - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml + #### stage: test cargo-audit: @@ -197,7 +205,7 @@ cargo-check-benches: <<: *docker-env <<: *test-refs script: - - BUILD_DUMMY_WASM_BINARY=1 time cargo +nightly check --benches --all + - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all - cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small - cargo run --release -p node-bench -- ::trie::read::small - sccache -s @@ -208,7 +216,7 @@ cargo-check-subkey: <<: *test-refs script: - cd ./bin/utils/subkey - - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --release - sccache -s test-deterministic-wasm: @@ -222,7 +230,7 @@ test-deterministic-wasm: # build runtime - cargo build --verbose --release -p node-runtime # make checksum - - sha256sum target/release/wbuild/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 + - sha256sum target/release/wbuild/node-runtime/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 # clean up – FIXME: can we reuse some of the artifacts? - cargo clean # build again @@ -306,13 +314,8 @@ check-web-wasm: script: # WASM support is in progress. As more and more crates support WASM, we # should add entries here. See https://github.com/paritytech/substrate/issues/2416 - - time cargo build --target=wasm32-unknown-unknown -p sp-io - - time cargo build --target=wasm32-unknown-unknown -p sp-runtime - - time cargo build --target=wasm32-unknown-unknown -p sp-std - - time cargo build --target=wasm32-unknown-unknown -p sc-consensus-aura - - time cargo build --target=wasm32-unknown-unknown -p sc-consensus-babe - - time cargo build --target=wasm32-unknown-unknown -p sp-consensus - - time cargo build --target=wasm32-unknown-unknown -p sc-telemetry + # Note: we don't need to test crates imported in `bin/node/cli` + - time cargo build --manifest-path=client/consensus/aura/Cargo.toml --target=wasm32-unknown-unknown --features getrandom # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown -Z features=itarget # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases @@ -343,19 +346,11 @@ cargo-check-macos: <<: *docker-env <<: *test-refs script: - - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --release - sccache -s tags: - osx -test-prometheus-alerting-rules: - stage: test - image: paritytech/tools:latest - <<: *kubernetes-build - script: - - promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml - - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml - #### stage: build check-polkadot-companion-status: @@ -400,12 +395,11 @@ build-linux-substrate: &build-binary <<: *collect-artifacts <<: *docker-env rules: - # .build-refs with manual on PRs and chaos + # .build-refs with manual on PRs - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # i.e add [chaos:basic] in commit message to trigger - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs when: manual allow_failure: true @@ -451,7 +445,7 @@ build-linux-subkey: &build-subkey - mkdir -p ./artifacts/subkey script: - cd ./bin/utils/subkey - - BUILD_DUMMY_WASM_BINARY=1 time cargo build --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --release --verbose - cd - - mv ./target/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -471,7 +465,9 @@ build-rust-doc: stage: build <<: *docker-env <<: *test-refs - allow_failure: true + needs: + - job: test-linux-stable + artifacts: false variables: <<: *default-vars RUSTFLAGS: -Dwarnings @@ -483,142 +479,44 @@ build-rust-doc: - ./crate-docs/ script: - rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds - - BUILD_DUMMY_WASM_BINARY=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" + - SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" time cargo +nightly doc --no-deps --workspace --all-features --verbose - mv ./target/doc ./crate-docs - echo "" > ./crate-docs/index.html - sccache -s -#### stage: post-build-test - -trigger-contracts-ci: - stage: post-build-test - needs: - - job: build-linux-substrate - artifacts: false - - job: test-linux-stable - artifacts: false - trigger: - project: parity/srml-contracts-waterfall - branch: master - strategy: depend - rules: - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - -#### stage: chaos-env - -build-chaos-docker: - stage: chaos-env - rules: - # .build-refs with chaos - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # i.e add [chaos:basic] in commit message to trigger - needs: - - job: build-linux-substrate - image: docker:stable - tags: - - kubernetes-parity-build - variables: - <<: *default-vars - DOCKER_HOST: tcp://localhost:2375 - DOCKER_DRIVER: overlay2 - PRODUCT: substrate - DOCKERFILE: $PRODUCT.Dockerfile - CONTAINER_IMAGE: paritypr/$PRODUCT - environment: - name: parity-simnet - services: - - docker:dind - before_script: - - test "$DOCKER_CHAOS_USER" -a "$DOCKER_CHAOS_TOKEN" - || ( echo "no docker credentials provided"; exit 1 ) - - docker login -u "$DOCKER_CHAOS_USER" -p "$DOCKER_CHAOS_TOKEN" - - docker info - script: - - cd ./artifacts/$PRODUCT/ - - VERSION="ci-${CI_COMMIT_SHORT_SHA}" - - echo "${PRODUCT} version = ${VERSION}" - - test -z "${VERSION}" && exit 1 - - docker build - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --tag $CONTAINER_IMAGE:$VERSION - --file $DOCKERFILE . - - docker push $CONTAINER_IMAGE:$VERSION - after_script: - - docker logout - -#### stage: chaos - -chaos-test-singlenodeheight: - stage: chaos - rules: - # .build-refs with chaos - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # i.e add [chaos:basic] in commit message to trigger - image: paritypr/simnet:latest - needs: - - job: build-chaos-docker - tags: - - parity-simnet - variables: - <<: *default-vars - PRODUCT: substrate - DOCKERFILE: $PRODUCT.Dockerfile - CONTAINER_IMAGE: paritypr/$PRODUCT - KEEP_NAMESPACE: 0 - NAMESPACE: "substrate-ci-${CI_COMMIT_SHORT_SHA}-${CI_PIPELINE_ID}" - VERSION: "ci-${CI_COMMIT_SHORT_SHA}" - interruptible: true - environment: - name: parity-simnet - script: - - simnet spawn dev -i $CONTAINER_IMAGE:$VERSION - - simnet singlenodeheight -h 30 - after_script: - - simnet clean - #### stage: publish .build-push-docker-image: &build-push-docker-image <<: *build-refs <<: *kubernetes-build - image: docker:stable - services: - - docker:dind + image: quay.io/buildah/stable variables: &docker-build-vars <<: *default-vars - DOCKER_HOST: tcp://localhost:2375 - DOCKER_DRIVER: overlay2 GIT_STRATEGY: none DOCKERFILE: $PRODUCT.Dockerfile - CONTAINER_IMAGE: parity/$PRODUCT + IMAGE_NAME: docker.io/parity/$PRODUCT before_script: - - test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" - || ( echo "no docker credentials provided"; exit 1 ) - - docker login -u "$Docker_Hub_User_Parity" -p "$Docker_Hub_Pass_Parity" - - docker info - script: - cd ./artifacts/$PRODUCT/ - VERSION="$(cat ./VERSION)" - echo "${PRODUCT} version = ${VERSION}" - test -z "${VERSION}" && exit 1 - - docker build - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --tag $CONTAINER_IMAGE:$VERSION - --tag $CONTAINER_IMAGE:latest - --file $DOCKERFILE . - - docker push $CONTAINER_IMAGE:$VERSION - - docker push $CONTAINER_IMAGE:latest + script: + - test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" || + ( echo "no docker credentials provided"; exit 1 ) + - buildah bud + --format=docker + --build-arg VCS_REF="${CI_COMMIT_SHA}" + --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + --tag "$IMAGE_NAME:$VERSION" + --tag "$IMAGE_NAME:latest" + --file "$DOCKERFILE" . + - echo "$Docker_Hub_Pass_Parity" | + buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io + - buildah info + - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" + - buildah push --format=v2s2 "$IMAGE_NAME:latest" + - buildah logout "$IMAGE_NAME" publish-docker-substrate: stage: publish @@ -632,7 +530,6 @@ publish-docker-substrate: <<: *docker-build-vars PRODUCT: substrate after_script: - - docker logout # only VERSION information is needed for the deployment - find ./artifacts/ -depth -not -name VERSION -type f -delete @@ -645,8 +542,6 @@ publish-docker-subkey: variables: <<: *docker-build-vars PRODUCT: subkey - after_script: - - docker logout publish-s3-release: stage: publish @@ -677,6 +572,8 @@ publish-s3-doc: needs: - job: build-rust-doc artifacts: true + - job: build-linux-substrate + artifacts: false <<: *build-refs <<: *kubernetes-build variables: @@ -699,7 +596,7 @@ publish-draft-release: image: paritytech/tools:latest rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - ./.maintain/gitlab/publish_draft_release.sh allow_failure: true @@ -709,14 +606,19 @@ publish-to-crates-io: <<: *docker-env rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - cargo unleash em-dragons --no-check --owner github:paritytech:core-devs ${CARGO_UNLEASH_PKG_DEF} allow_failure: true -deploy-kubernetes-alerting-rules: +#### stage: deploy + +deploy-prometheus-alerting-rules: stage: deploy + needs: + - job: test-prometheus-alerting-rules + artifacts: false interruptible: true retry: 1 tags: @@ -738,21 +640,29 @@ deploy-kubernetes-alerting-rules: - .gitlab-ci.yml - .maintain/monitoring/**/* +trigger-simnet: + stage: deploy + rules: + # this job runs only on nightly pipeline with the mentioned variable, against `master` branch + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" + needs: + - job: publish-docker-substrate + artifacts: false + trigger: + project: parity/simnet + branch: master + strategy: depend + .validator-deploy: &validator-deploy - stage: flaming-fir + stage: deploy rules: - # .build-refs, but manual - - if: $CI_COMMIT_REF_NAME == "master" - when: manual - - if: $CI_PIPELINE_SOURCE == "web" - when: manual - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - when: manual + # this job runs only on nightly pipeline with the mentioned variable, against `master` branch + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" needs: # script will fail if there is no artifacts/substrate/VERSION - job: publish-docker-substrate artifacts: true - image: parity/azure-ansible:v1 + image: parity/azure-ansible:v2 allow_failure: true interruptible: true tags: @@ -777,14 +687,3 @@ validator 4 4: <<: *validator-deploy script: - ./.maintain/flamingfir-deploy.sh flamingfir-validator4 - -#### stage: .post - -check-labels: - stage: .post - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_labels.sh diff --git a/.maintain/chaostest/.eslintignore b/.maintain/chaostest/.eslintignore deleted file mode 100644 index 3c3629e647f5d..0000000000000 --- a/.maintain/chaostest/.eslintignore +++ /dev/null @@ -1 +0,0 @@ -node_modules diff --git a/.maintain/chaostest/.eslintrc.json b/.maintain/chaostest/.eslintrc.json deleted file mode 100644 index 43e483a80b2ea..0000000000000 --- a/.maintain/chaostest/.eslintrc.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "env": { - "node": true, - "commonjs": true, - "es6": true - }, - "extends": [ - "standard" - ], - "globals": { - "Atomics": "readonly", - "SharedArrayBuffer": "readonly" - }, - "parserOptions": { - "ecmaVersion": 2018 - }, - "rules": { - } -} diff --git a/.maintain/chaostest/.gitignore b/.maintain/chaostest/.gitignore deleted file mode 100644 index ef9e9d1e696e4..0000000000000 --- a/.maintain/chaostest/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -*-debug.log -*-error.log -/.nyc_output -/dist -/tmp -/log -.DS_Store -.editorconfig -yarn.lock -node_modules -/src/config/config.json diff --git a/.maintain/chaostest/README.md b/.maintain/chaostest/README.md deleted file mode 100644 index dc3d07b57905e..0000000000000 --- a/.maintain/chaostest/README.md +++ /dev/null @@ -1,89 +0,0 @@ -chaostest -========= - -A cli for chaos testing on substrate - -[![oclif](https://img.shields.io/badge/cli-oclif-brightgreen.svg)](https://oclif.io) -[![Version](https://img.shields.io/npm/v/chaostest.svg)](https://npmjs.org/package/chaostest) -[![Downloads/week](https://img.shields.io/npm/dw/chaostest.svg)](https://npmjs.org/package/chaostest) - - -* [Usage](#usage) -* [Commands](#commands) - -# Usage - -```sh-session -$ npm install -g chaostest // yarn add global chaostest -$ chaostest COMMAND -running command... -$ chaostest (-v|--version|version) -chaostest/0.0.0 darwin-x64 node-v8.16.0 -$ chaostest --help [COMMAND] -USAGE - $ chaostest COMMAND -... -``` - -# Commands - -* [`chaostest spawn`](#chaostest-spawn) -* [`chaostest singlenodeheight`](#chaostest-singlenodeheight) -* [`chaostest clean`](#chaostest-clean) - -## `chaostest spawn` - -Spawn a testnet based on your local k8s configuration. Could be either a dev node, a two node alicebob chain or a customized chain with various validators/fullnodes. - -``` -USAGE - $ chaostest spawn [ARGUMENTS] [FLAGS] - -Arguments - dev, a single fullnode in --dev mode - alicebob, a two nodes private chain with Alice as bootnode and Bob as validator - [chainName], a customized chain deployed with -v numbers of validators and -n numbers of fullnodes - -Flags - --image, -i, the image tag of the certain substrate version you want to deploy - --port, -p, the port to expose when image is deployed in a pod - --namespace, the desired namespace to deploy on - --validator, -v, the number of substrate validators to deploy - --node, -n, the number of full nodes, if not set but exists, default to 1 - -DESCRIPTION - ... - Extra documentation goes here -``` - -_See code: [src/commands/spawn/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/spawn/index.js)_ - -## `chaostest singlenodeheight` - -Test against a fullnode on --dev mode to check if it can successfully produce blocks to a certain height. - -``` -USAGE - $ chaostest singlenodeheight [FLAGS] - -FLAGS - -h , the desired height of blocks to check if reachable, this only works with integers smaller than 2^6 - -t, the wait time out before it halts the polling -``` - -_See code: [src/commands/singlenodeheight/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/singlenodeheight/index.js)_ - -## `chaostest clean` - -Clean up the k8s deployment by namespace. - -``` -USAGE - $ chaostest clean [FLAGS] - -FLAGS - -n , the desired namespace to delete on your k8s cluster -``` - -_See code: [src/commands/clean/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/clean/index.js)_ - diff --git a/.maintain/chaostest/bin/run b/.maintain/chaostest/bin/run deleted file mode 100755 index 30b14e177331d..0000000000000 --- a/.maintain/chaostest/bin/run +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env node - -require('@oclif/command').run() -.then(require('@oclif/command/flush')) -.catch(require('@oclif/errors/handle')) diff --git a/.maintain/chaostest/bin/run.cmd b/.maintain/chaostest/bin/run.cmd deleted file mode 100644 index 968fc30758e68..0000000000000 --- a/.maintain/chaostest/bin/run.cmd +++ /dev/null @@ -1,3 +0,0 @@ -@echo off - -node "%~dp0\run" %* diff --git a/.maintain/chaostest/package-lock.json b/.maintain/chaostest/package-lock.json deleted file mode 100644 index 09468e12fb4f9..0000000000000 --- a/.maintain/chaostest/package-lock.json +++ /dev/null @@ -1,5950 +0,0 @@ -{ - "name": "chaostest", - "version": "0.0.0", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "@babel/code-frame": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", - "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", - "dev": true, - "requires": { - "@babel/highlight": "^7.8.3" - } - }, - "@babel/generator": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.9.6.tgz", - "integrity": "sha512-+htwWKJbH2bL72HRluF8zumBxzuX0ZZUFl3JLNyoUjM/Ho8wnVpPXM6aUz8cfKDqQ/h7zHqKt4xzJteUosckqQ==", - "dev": true, - "requires": { - "@babel/types": "^7.9.6", - "jsesc": "^2.5.1", - "lodash": "^4.17.13", - "source-map": "^0.5.0" - } - }, - "@babel/helper-function-name": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.9.5.tgz", - "integrity": "sha512-JVcQZeXM59Cd1qanDUxv9fgJpt3NeKUaqBqUEvfmQ+BCOKq2xUgaWZW2hr0dkbyJgezYuplEoh5knmrnS68efw==", - "dev": true, - "requires": { - "@babel/helper-get-function-arity": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/types": "^7.9.5" - } - }, - "@babel/helper-get-function-arity": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz", - "integrity": "sha512-FVDR+Gd9iLjUMY1fzE2SR0IuaJToR4RkCDARVfsBBPSP53GEqSFjD8gNyxg246VUyc/ALRxFaAK8rVG7UT7xRA==", - "dev": true, - "requires": { - "@babel/types": "^7.8.3" - } - }, - "@babel/helper-split-export-declaration": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz", - "integrity": "sha512-3x3yOeyBhW851hroze7ElzdkeRXQYQbFIb7gLK1WQYsw2GWDay5gAJNw1sWJ0VFP6z5J1whqeXH/WCdCjZv6dA==", - "dev": true, - "requires": { - "@babel/types": "^7.8.3" - } - }, - "@babel/helper-validator-identifier": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.9.5.tgz", - "integrity": "sha512-/8arLKUFq882w4tWGj9JYzRpAlZgiWUJ+dtteNTDqrRBz9Iguck9Rn3ykuBDoUwh2TO4tSAJlrxDUOXWklJe4g==", - "dev": true - }, - "@babel/highlight": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.9.0.tgz", - "integrity": "sha512-lJZPilxX7Op3Nv/2cvFdnlepPXDxi29wxteT57Q965oc5R9v86ztx0jfxVrTcBk8C2kcPkkDa2Z4T3ZsPPVWsQ==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.9.0", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "@babel/parser": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.9.6.tgz", - "integrity": "sha512-AoeIEJn8vt+d/6+PXDRPaksYhnlbMIiejioBZvvMQsOjW/JYK6k/0dKnvvP3EhK5GfMBWDPtrxRtegWdAcdq9Q==", - "dev": true - }, - "@babel/runtime": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.9.6.tgz", - "integrity": "sha512-64AF1xY3OAkFHqOb9s4jpgk1Mm5vDZ4L3acHvAml+53nO1XbXLuDodsVpO4OIUsmemlUHMxNdYMNJmsvOwLrvQ==", - "requires": { - "regenerator-runtime": "^0.13.4" - } - }, - "@babel/template": { - "version": "7.8.6", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.6.tgz", - "integrity": "sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/parser": "^7.8.6", - "@babel/types": "^7.8.6" - } - }, - "@babel/traverse": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.9.6.tgz", - "integrity": "sha512-b3rAHSjbxy6VEAvlxM8OV/0X4XrG72zoxme6q1MOoe2vd0bEc+TwayhuC1+Dfgqh1QEG+pj7atQqvUprHIccsg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.9.6", - "@babel/helper-function-name": "^7.9.5", - "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/parser": "^7.9.6", - "@babel/types": "^7.9.6", - "debug": "^4.1.0", - "globals": "^11.1.0", - "lodash": "^4.17.13" - } - }, - "@babel/types": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.9.6.tgz", - "integrity": "sha512-qxXzvBO//jO9ZnoasKF1uJzHd2+M6Q2ZPIVfnFps8JJvXy0ZBbwbNOmE6SGIY5XOY6d1Bo5lb9d9RJ8nv3WSeA==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.9.5", - "lodash": "^4.17.13", - "to-fast-properties": "^2.0.0" - } - }, - "@kubernetes/client-node": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.11.2.tgz", - "integrity": "sha512-Uhwd2y2qCvugICnHRC5h2MT5vw0a1dJPVVltVwmkeMuyGTPBccsTtpTcSfSLitwOrh4yr+9wG5bRcMdgeRjYPw==", - "requires": { - "@types/js-yaml": "^3.12.1", - "@types/node": "^10.12.0", - "@types/request": "^2.47.1", - "@types/underscore": "^1.8.9", - "@types/ws": "^6.0.1", - "byline": "^5.0.0", - "execa": "1.0.0", - "isomorphic-ws": "^4.0.1", - "js-yaml": "^3.13.1", - "jsonpath-plus": "^0.19.0", - "openid-client": "2.5.0", - "request": "^2.88.0", - "rfc4648": "^1.3.0", - "shelljs": "^0.8.2", - "tslib": "^1.9.3", - "underscore": "^1.9.1", - "ws": "^6.1.0" - }, - "dependencies": { - "jsonpath-plus": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-0.19.0.tgz", - "integrity": "sha512-GSVwsrzW9LsA5lzsqe4CkuZ9wp+kxBb2GwNniaWzI2YFn5Ig42rSW8ZxVpWXaAfakXNrx5pgY5AbQq7kzX29kg==" - } - } - }, - "@nodelib/fs.scandir": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz", - "integrity": "sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "2.0.3", - "run-parallel": "^1.1.9" - } - }, - "@nodelib/fs.stat": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz", - "integrity": "sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA==", - "dev": true - }, - "@nodelib/fs.walk": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz", - "integrity": "sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ==", - "dev": true, - "requires": { - "@nodelib/fs.scandir": "2.1.3", - "fastq": "^1.6.0" - } - }, - "@oclif/command": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/@oclif/command/-/command-1.6.1.tgz", - "integrity": "sha512-pvmMmfGn+zm4e4RwVw63mg9sIaqKqmVsFbImQoUrCO/43UmWzoSHWNXKdgEGigOezWrkZfFucaeZcSbp149OWg==", - "requires": { - "@oclif/config": "^1.15.1", - "@oclif/errors": "^1.2.2", - "@oclif/parser": "^3.8.3", - "@oclif/plugin-help": "^3", - "debug": "^4.1.1", - "semver": "^5.6.0" - }, - "dependencies": { - "@oclif/plugin-help": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@oclif/plugin-help/-/plugin-help-3.0.1.tgz", - "integrity": "sha512-Q1OITeUBkkydPf6r5qX75KgE9capr1mNrfHtfD7gkVXmqoTndrbc++z4KfAYNf5nhTCY7N9l52sjbF6BrSGu9w==", - "requires": { - "@oclif/command": "^1.5.20", - "@oclif/config": "^1.15.1", - "chalk": "^2.4.1", - "indent-string": "^4.0.0", - "lodash.template": "^4.4.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0", - "widest-line": "^2.0.1", - "wrap-ansi": "^4.0.0" - } - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "@oclif/config": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/@oclif/config/-/config-1.15.1.tgz", - "integrity": "sha512-GdyHpEZuWlfU8GSaZoiywtfVBsPcfYn1KuSLT1JTfvZGpPG6vShcGr24YZ3HG2jXUFlIuAqDcYlTzOrqOdTPNQ==", - "requires": { - "@oclif/errors": "^1.0.0", - "@oclif/parser": "^3.8.0", - "debug": "^4.1.1", - "tslib": "^1.9.3" - } - }, - "@oclif/dev-cli": { - "version": "1.22.2", - "resolved": "https://registry.npmjs.org/@oclif/dev-cli/-/dev-cli-1.22.2.tgz", - "integrity": "sha512-c7633R37RxrQIpwqPKxjNRm6/jb1yuG8fd16hmNz9Nw+/MUhEtQtKHSCe9ScH8n5M06l6LEo4ldk9LEGtpaWwA==", - "dev": true, - "requires": { - "@oclif/command": "^1.5.13", - "@oclif/config": "^1.12.12", - "@oclif/errors": "^1.2.2", - "@oclif/plugin-help": "^2.1.6", - "cli-ux": "^5.2.1", - "debug": "^4.1.1", - "fs-extra": "^7.0.1", - "github-slugger": "^1.2.1", - "lodash": "^4.17.11", - "normalize-package-data": "^2.5.0", - "qqjs": "^0.3.10", - "tslib": "^1.9.3" - } - }, - "@oclif/errors": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@oclif/errors/-/errors-1.2.2.tgz", - "integrity": "sha512-Eq8BFuJUQcbAPVofDxwdE0bL14inIiwt5EaKRVY9ZDIG11jwdXZqiQEECJx0VfnLyUZdYfRd/znDI/MytdJoKg==", - "requires": { - "clean-stack": "^1.3.0", - "fs-extra": "^7.0.0", - "indent-string": "^3.2.0", - "strip-ansi": "^5.0.0", - "wrap-ansi": "^4.0.0" - } - }, - "@oclif/linewrap": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@oclif/linewrap/-/linewrap-1.0.0.tgz", - "integrity": "sha512-Ups2dShK52xXa8w6iBWLgcjPJWjais6KPJQq3gQ/88AY6BXoTX+MIGFPrWQO1KLMiQfoTpcLnUwloN4brrVUHw==" - }, - "@oclif/parser": { - "version": "3.8.5", - "resolved": "https://registry.npmjs.org/@oclif/parser/-/parser-3.8.5.tgz", - "integrity": "sha512-yojzeEfmSxjjkAvMRj0KzspXlMjCfBzNRPkWw8ZwOSoNWoJn+OCS/m/S+yfV6BvAM4u2lTzX9Y5rCbrFIgkJLg==", - "requires": { - "@oclif/errors": "^1.2.2", - "@oclif/linewrap": "^1.0.0", - "chalk": "^2.4.2", - "tslib": "^1.9.3" - } - }, - "@oclif/plugin-help": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/@oclif/plugin-help/-/plugin-help-2.2.3.tgz", - "integrity": "sha512-bGHUdo5e7DjPJ0vTeRBMIrfqTRDBfyR5w0MP41u0n3r7YG5p14lvMmiCXxi6WDaP2Hw5nqx3PnkAIntCKZZN7g==", - "requires": { - "@oclif/command": "^1.5.13", - "chalk": "^2.4.1", - "indent-string": "^4.0.0", - "lodash.template": "^4.4.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0", - "widest-line": "^2.0.1", - "wrap-ansi": "^4.0.0" - }, - "dependencies": { - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "@oclif/screen": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@oclif/screen/-/screen-1.0.4.tgz", - "integrity": "sha512-60CHpq+eqnTxLZQ4PGHYNwUX572hgpMHGPtTWMjdTMsAvlm69lZV/4ly6O3sAYkomo4NggGcomrDpBe34rxUqw==", - "dev": true - }, - "@oclif/test": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@oclif/test/-/test-1.2.6.tgz", - "integrity": "sha512-8BQm0VFwTf/JpDnI3x6Lbp3S4RRUvQcv8WalKm82+7FNEylWMAXFNgBuzG65cNPj11J2jhlVo0gOWGF6hbiaJQ==", - "dev": true, - "requires": { - "fancy-test": "^1.4.3" - } - }, - "@polkadot/api": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-0.95.2.tgz", - "integrity": "sha512-SrYiEE9T+AmCx18NyhEk5l/7yPvVqogiz7rmW8YGlOZ89OEPHe2dOTaD5tZJ5daKXEkXFsqPPtwemCv2OZ2F1g==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/api-derive": "^0.95.2", - "@polkadot/api-metadata": "^0.95.2", - "@polkadot/keyring": "^1.6.1", - "@polkadot/rpc-core": "^0.95.2", - "@polkadot/rpc-provider": "^0.95.2", - "@polkadot/types": "^0.95.2", - "@polkadot/util-crypto": "^1.6.1" - } - }, - "@polkadot/api-derive": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-0.95.2.tgz", - "integrity": "sha512-IScOMoUnrs/TCPk2zZZWUfw1EfV718HuFbIRFVg11PiG/uYQ+knNpr9cG/auRWelDMO0ef7eI+YOpf9+gV3EZw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/api": "^0.95.2", - "@polkadot/types": "^0.95.2" - } - }, - "@polkadot/api-metadata": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-metadata/-/api-metadata-0.95.2.tgz", - "integrity": "sha512-RyHr6o8Qdi0k1cTJj11AqZ3MFoPbqUK37RMpFH8vK6VHlZRlpqaZsCctWMEiOXQC2CtTnE5CIoQH11AKeIK+jw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/types": "^0.95.2", - "@polkadot/util": "^1.6.1", - "@polkadot/util-crypto": "^1.6.1" - } - }, - "@polkadot/jsonrpc": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/jsonrpc/-/jsonrpc-0.95.2.tgz", - "integrity": "sha512-U8cx5MuhWPRcuosSHv/Qw4OmlgSk410oTQtYvHAFDoHuPDcYXTBcCJ0e31cCZFBkaed+GTelkex9EPnHFi0x1g==", - "requires": { - "@babel/runtime": "^7.6.3" - } - }, - "@polkadot/keyring": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-1.8.1.tgz", - "integrity": "sha512-KeDbfP8biY3bXEhMv1ANp9d3kCuXj2oxseuDK0jvxRo7CehVME9UwAMGQK3Y9NCUuYWd+xTO2To0ZOqR7hdmuQ==", - "requires": { - "@babel/runtime": "^7.7.7", - "@polkadot/util": "^1.8.1", - "@polkadot/util-crypto": "^1.8.1" - } - }, - "@polkadot/rpc-core": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-0.95.2.tgz", - "integrity": "sha512-IjuzYfNSBWalzingkvpGdO9lZH6s5wFc5lWCINFDP/MSlnLfKzufzR0JeSiVCluraoohtUB/INVuBujDziZPzg==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/jsonrpc": "^0.95.2", - "@polkadot/rpc-provider": "^0.95.2", - "@polkadot/types": "^0.95.2", - "@polkadot/util": "^1.6.1", - "rxjs": "^6.5.3" - } - }, - "@polkadot/rpc-provider": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-0.95.2.tgz", - "integrity": "sha512-+vSoI9mdHPnjL7jK666+HLJ21Ymxo8GHdO72mI1A3xGO7wBmjKbUMHEYUtRwxg7DGF4mSZ/HJogoSU4i9smzpw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/api-metadata": "^0.95.2", - "@polkadot/util": "^1.6.1", - "@polkadot/util-crypto": "^1.6.1", - "@types/nock": "^11.1.0", - "eventemitter3": "^4.0.0", - "isomorphic-fetch": "^2.2.1", - "websocket": "^1.0.30" - } - }, - "@polkadot/types": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-0.95.2.tgz", - "integrity": "sha512-YiZbLgJ82rmgwbsYWEL8vtYqO1n1xEPxD5C8D0dmZQcwn9iSUibIqeij1xfd8y2ZyUmMW3YhdoJR6a8Ah6g3yw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/util": "^1.6.1", - "@polkadot/util-crypto": "^1.6.1", - "@types/memoizee": "^0.4.3", - "memoizee": "^0.4.14" - } - }, - "@polkadot/util": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-1.8.1.tgz", - "integrity": "sha512-sFpr+JLCG9d+epjboXsmJ1qcKa96r8ZYzXmVo8+aPzI/9jKKyez6Unox/dnfnpKppZB2nJuLcsxQm6nocp2Caw==", - "requires": { - "@babel/runtime": "^7.7.7", - "@types/bn.js": "^4.11.6", - "bn.js": "^4.11.8", - "camelcase": "^5.3.1", - "chalk": "^3.0.0", - "ip-regex": "^4.1.0", - "moment": "^2.24.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@polkadot/util-crypto": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-1.8.1.tgz", - "integrity": "sha512-ypUs10hV1HPvYc0ZsEu+LTGSEh0rkr0as/FUh7+Z9v3Bxibn3aO+EOxJPQuDbZZ59FSMRmc9SeOSa0wn9ddrnw==", - "requires": { - "@babel/runtime": "^7.7.7", - "@polkadot/util": "^1.8.1", - "@polkadot/wasm-crypto": "^0.14.1", - "@types/bip39": "^2.4.2", - "@types/bs58": "^4.0.0", - "@types/pbkdf2": "^3.0.0", - "@types/secp256k1": "^3.5.0", - "@types/xxhashjs": "^0.2.1", - "base-x": "3.0.5", - "bip39": "^2.5.0", - "blakejs": "^1.1.0", - "bs58": "^4.0.1", - "js-sha3": "^0.8.0", - "secp256k1": "^3.8.0", - "tweetnacl": "^1.0.1", - "xxhashjs": "^0.2.2" - }, - "dependencies": { - "secp256k1": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/secp256k1/-/secp256k1-3.8.0.tgz", - "integrity": "sha512-k5ke5avRZbtl9Tqx/SA7CbY3NF6Ro+Sj9cZxezFzuBlLDmyqPiL8hJJ+EmzD8Ig4LUDByHJ3/iPOVoRixs/hmw==", - "requires": { - "bindings": "^1.5.0", - "bip66": "^1.1.5", - "bn.js": "^4.11.8", - "create-hash": "^1.2.0", - "drbg.js": "^1.0.1", - "elliptic": "^6.5.2", - "nan": "^2.14.0", - "safe-buffer": "^5.1.2" - } - }, - "tweetnacl": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", - "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" - } - } - }, - "@polkadot/wasm-crypto": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-0.14.1.tgz", - "integrity": "sha512-Xng7L2Z8TNZa/5g6pot4O06Jf0ohQRZdvfl8eQL+E/L2mcqJYC1IjkMxJBSBuQEV7hisWzh9mHOy5WCcgPk29Q==" - }, - "@sindresorhus/is": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz", - "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==" - }, - "@types/bip39": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/@types/bip39/-/bip39-2.4.2.tgz", - "integrity": "sha512-Vo9lqOIRq8uoIzEVrV87ZvcIM0PN9t0K3oYZ/CS61fIYKCBdOIM7mlWzXuRvSXrDtVa1uUO2w1cdfufxTC0bzg==", - "requires": { - "@types/node": "*" - } - }, - "@types/bn.js": { - "version": "4.11.6", - "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", - "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", - "requires": { - "@types/node": "*" - } - }, - "@types/bs58": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/bs58/-/bs58-4.0.1.tgz", - "integrity": "sha512-yfAgiWgVLjFCmRv8zAcOIHywYATEwiTVccTLnRp6UxTNavT55M9d/uhK3T03St/+8/z/wW+CRjGKUNmEqoHHCA==", - "requires": { - "base-x": "^3.0.6" - }, - "dependencies": { - "base-x": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.8.tgz", - "integrity": "sha512-Rl/1AWP4J/zRrk54hhlxH4drNxPJXYUaKffODVI53/dAsV4t9fBxyxYKAVPU1XBHxYwOWP9h9H0hM2MVw4YfJA==", - "requires": { - "safe-buffer": "^5.0.1" - } - } - } - }, - "@types/caseless": { - "version": "0.12.2", - "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.2.tgz", - "integrity": "sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w==" - }, - "@types/chai": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.11.tgz", - "integrity": "sha512-t7uW6eFafjO+qJ3BIV2gGUyZs27egcNRkUdalkud+Qa3+kg//f129iuOFivHDXQ+vnU3fDXuwgv0cqMCbcE8sw==", - "dev": true - }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" - }, - "@types/events": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz", - "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==", - "dev": true - }, - "@types/glob": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.1.tgz", - "integrity": "sha512-1Bh06cbWJUHMC97acuD6UMG29nMt0Aqz1vF3guLfG+kHHJhy3AyohZFFxYk2f7Q1SQIrNwvncxAE0N/9s70F2w==", - "dev": true, - "requires": { - "@types/events": "*", - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "@types/js-yaml": { - "version": "3.12.4", - "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.4.tgz", - "integrity": "sha512-fYMgzN+9e28R81weVN49inn/u798ruU91En1ZnGvSZzCRc5jXx9B2EDhlRaWmcO1RIxFHL8AajRXzxDuJu93+A==" - }, - "@types/lodash": { - "version": "4.14.152", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.152.tgz", - "integrity": "sha512-Vwf9YF2x1GE3WNeUMjT5bTHa2DqgUo87ocdgTScupY2JclZ5Nn7W2RLM/N0+oreexUk8uaVugR81NnTY/jNNXg==", - "dev": true - }, - "@types/memoizee": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/@types/memoizee/-/memoizee-0.4.4.tgz", - "integrity": "sha512-c9+1g6+6vEqcw5UuM0RbfQV0mssmZcoG9+hNC5ptDCsv4G+XJW1Z4pE13wV5zbc9e0+YrDydALBTiD3nWG1a3g==" - }, - "@types/minimatch": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz", - "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==", - "dev": true - }, - "@types/mocha": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-7.0.2.tgz", - "integrity": "sha512-ZvO2tAcjmMi8V/5Z3JsyofMe3hasRcaw88cto5etSVMwVQfeivGAlEYmaQgceUSVYFofVjT+ioHsATjdWcFt1w==", - "dev": true - }, - "@types/nock": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/@types/nock/-/nock-11.1.0.tgz", - "integrity": "sha512-jI/ewavBQ7X5178262JQR0ewicPAcJhXS/iFaNJl0VHLfyosZ/kwSrsa6VNQNSO8i9d8SqdRgOtZSOKJ/+iNMw==", - "requires": { - "nock": "*" - } - }, - "@types/node": { - "version": "10.17.24", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.24.tgz", - "integrity": "sha512-5SCfvCxV74kzR3uWgTYiGxrd69TbT1I6+cMx1A5kEly/IVveJBimtAMlXiEyVFn5DvUFewQWxOOiJhlxeQwxgA==" - }, - "@types/pbkdf2": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/pbkdf2/-/pbkdf2-3.0.0.tgz", - "integrity": "sha512-6J6MHaAlBJC/eVMy9jOwj9oHaprfutukfW/Dyt0NEnpQ/6HN6YQrpvLwzWdWDeWZIdenjGHlbYDzyEODO5Z+2Q==", - "requires": { - "@types/node": "*" - } - }, - "@types/request": { - "version": "2.48.5", - "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.5.tgz", - "integrity": "sha512-/LO7xRVnL3DxJ1WkPGDQrp4VTV1reX9RkC85mJ+Qzykj2Bdw+mG15aAfDahc76HtknjzE16SX/Yddn6MxVbmGQ==", - "requires": { - "@types/caseless": "*", - "@types/node": "*", - "@types/tough-cookie": "*", - "form-data": "^2.5.0" - } - }, - "@types/secp256k1": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/@types/secp256k1/-/secp256k1-3.5.3.tgz", - "integrity": "sha512-NGcsPDR0P+Q71O63e2ayshmiZGAwCOa/cLJzOIuhOiDvmbvrCIiVtEpqdCJGogG92Bnr6tw/6lqVBsRMEl15OQ==", - "requires": { - "@types/node": "*" - } - }, - "@types/sinon": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-9.0.4.tgz", - "integrity": "sha512-sJmb32asJZY6Z2u09bl0G2wglSxDlROlAejCjsnor+LzBMz17gu8IU7vKC/vWDnv9zEq2wqADHVXFjf4eE8Gdw==", - "dev": true, - "requires": { - "@types/sinonjs__fake-timers": "*" - } - }, - "@types/sinonjs__fake-timers": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-6.0.1.tgz", - "integrity": "sha512-yYezQwGWty8ziyYLdZjwxyMb0CZR49h8JALHGrxjQHWlqGgc8kLdHEgWrgL0uZ29DMvEVBDnHU2Wg36zKSIUtA==", - "dev": true - }, - "@types/tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-I99sngh224D0M7XgW1s120zxCt3VYQ3IQsuw3P3jbq5GG4yc79+ZjyKznyOGIQrflfylLgcfekeZW/vk0yng6A==" - }, - "@types/underscore": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.10.0.tgz", - "integrity": "sha512-ZAbqul7QAKpM2h1PFGa5ETN27ulmqtj0QviYHasw9LffvXZvVHuraOx/FOsIPPDNGZN0Qo1nASxxSfMYOtSoCw==" - }, - "@types/ws": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-6.0.4.tgz", - "integrity": "sha512-PpPrX7SZW9re6+Ha8ojZG4Se8AZXgf0GK6zmfqEuCsY49LFDNXO3SByp44X3dFEqtB73lkCDAdUazhAjVPiNwg==", - "requires": { - "@types/node": "*" - } - }, - "@types/xxhashjs": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/@types/xxhashjs/-/xxhashjs-0.2.2.tgz", - "integrity": "sha512-+hlk/W1kgnZn0vR22XNhxHk/qIRQYF54i0UTF2MwBAPd0e7xSy+jKOJwSwTdRQrNnOMRVv+vsh8ITV0uyhp2yg==", - "requires": { - "@types/node": "*" - } - }, - "acorn": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.2.0.tgz", - "integrity": "sha512-apwXVmYVpQ34m/i71vrApRrRKCWQnZZF1+npOD0WV5xZFfwWOmKGQ2RWlfdy9vWITsenisM8M0Qeq8agcFHNiQ==", - "dev": true - }, - "acorn-jsx": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", - "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==", - "dev": true - }, - "aggregate-error": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-1.0.0.tgz", - "integrity": "sha1-iINE2tAiCnLjr1CQYRf0h3GSX6w=", - "requires": { - "clean-stack": "^1.0.0", - "indent-string": "^3.0.0" - } - }, - "ajv": { - "version": "6.12.2", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.2.tgz", - "integrity": "sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ==", - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ansi-escapes": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", - "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", - "dev": true, - "requires": { - "type-fest": "^0.11.0" - } - }, - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "ansicolors": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz", - "integrity": "sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk=", - "dev": true - }, - "append-transform": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-1.0.0.tgz", - "integrity": "sha512-P009oYkeHyU742iSZJzZZywj4QRJdnTWffaKuJQLablCZ1uz6/cW4yaRgcDaoQ+uwOxxnt0gRUcwfsNP2ri0gw==", - "dev": true, - "requires": { - "default-require-extensions": "^2.0.0" - } - }, - "archy": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", - "integrity": "sha1-+cjBN1fMHde8N5rHeyxipcKGjEA=", - "dev": true - }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "array-includes": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.1.tgz", - "integrity": "sha512-c2VXaCHl7zPsvpkFsw4nxvFie4fh1ur9bpcgsVkIjqn0H/Xwdg+7fv3n2r/isyS8EBj5b06M9kHyZuIr4El6WQ==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0", - "is-string": "^1.0.5" - } - }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, - "array.prototype.flat": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.3.tgz", - "integrity": "sha512-gBlRZV0VSmfPIeWfuuy56XZMvbVfbEUnOXUvt3F/eUUUSyzlgLxhEX4YAEpxNAogRGehPSnfXyPtYyKAhkzQhQ==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1" - } - }, - "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" - }, - "assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", - "dev": true - }, - "astral-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", - "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", - "dev": true - }, - "async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "requires": { - "lodash": "^4.17.14" - } - }, - "async-limiter": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" - }, - "aws4": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz", - "integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==" - }, - "balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" - }, - "base-x": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.5.tgz", - "integrity": "sha512-C3picSgzPSLE+jW3tcBzJoGwitOtazb5B+5YmAxZm2ybmTi9LNgAtDO/jjVEBZwHoXmDBZ9m/IELj3elJVRBcA==", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "base64-js": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", - "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" - }, - "base64url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", - "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==" - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "requires": { - "tweetnacl": "^0.14.3" - } - }, - "bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "requires": { - "file-uri-to-path": "1.0.0" - } - }, - "bip39": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/bip39/-/bip39-2.6.0.tgz", - "integrity": "sha512-RrnQRG2EgEoqO24ea+Q/fftuPUZLmrEM3qNhhGsA3PbaXaCW791LTzPuVyx/VprXQcTbPJ3K3UeTna8ZnVl2sg==", - "requires": { - "create-hash": "^1.1.0", - "pbkdf2": "^3.0.9", - "randombytes": "^2.0.1", - "safe-buffer": "^5.0.1", - "unorm": "^1.3.3" - } - }, - "bip66": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/bip66/-/bip66-1.1.5.tgz", - "integrity": "sha1-AfqHSHhcpwlV1QESF9GzE5lpyiI=", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "bl": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.0.3.tgz", - "integrity": "sha512-fs4G6/Hu4/EE+F75J8DuN/0IpQqNjAdC7aEQv7Qt8MHGUH7Ckv2MwTEEeN9QehD0pfIDkMI1bkHYkKy7xHyKIg==", - "dev": true, - "requires": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "blakejs": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/blakejs/-/blakejs-1.1.0.tgz", - "integrity": "sha1-ad+S75U6qIylGjLfarHFShVfx6U=" - }, - "bn.js": { - "version": "4.11.9", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "requires": { - "fill-range": "^7.0.1" - } - }, - "brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" - }, - "browserify-aes": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "requires": { - "buffer-xor": "^1.0.3", - "cipher-base": "^1.0.0", - "create-hash": "^1.1.0", - "evp_bytestokey": "^1.0.3", - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "browserify-zlib": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "requires": { - "pako": "~1.0.5" - } - }, - "bs58": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/bs58/-/bs58-4.0.1.tgz", - "integrity": "sha1-vhYedsNU9veIrkBx9j806MTwpCo=", - "requires": { - "base-x": "^3.0.2" - } - }, - "buffer": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.6.0.tgz", - "integrity": "sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==", - "requires": { - "base64-js": "^1.0.2", - "ieee754": "^1.1.4" - } - }, - "buffer-xor": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" - }, - "byline": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", - "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=" - }, - "cacheable-request": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz", - "integrity": "sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0=", - "requires": { - "clone-response": "1.0.2", - "get-stream": "3.0.0", - "http-cache-semantics": "3.8.1", - "keyv": "3.0.0", - "lowercase-keys": "1.0.0", - "normalize-url": "2.0.1", - "responselike": "1.0.2" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=" - }, - "lowercase-keys": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz", - "integrity": "sha1-TjNms55/VFfjXxMkvfb4jQv8cwY=" - } - } - }, - "caching-transform": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-3.0.2.tgz", - "integrity": "sha512-Mtgcv3lh3U0zRii/6qVgQODdPA4G3zhG+jtbCWj39RXuUFTMzH0vcdMtaJS1jPowd+It2Pqr6y3NJMQqOqCE2w==", - "dev": true, - "requires": { - "hasha": "^3.0.0", - "make-dir": "^2.0.0", - "package-hash": "^3.0.0", - "write-file-atomic": "^2.4.2" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "write-file-atomic": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.4.3.tgz", - "integrity": "sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.11", - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.2" - } - } - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true - }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" - }, - "cardinal": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz", - "integrity": "sha1-fMEFXYItISlU0HsIXeolHMe8VQU=", - "dev": true, - "requires": { - "ansicolors": "~0.3.2", - "redeyed": "~2.1.0" - } - }, - "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "chai": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.2.0.tgz", - "integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==", - "dev": true, - "requires": { - "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^3.0.1", - "get-func-name": "^2.0.0", - "pathval": "^1.1.0", - "type-detect": "^4.0.5" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", - "dev": true - }, - "check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", - "dev": true - }, - "chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", - "dev": true - }, - "cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "clean-regexp": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/clean-regexp/-/clean-regexp-1.0.0.tgz", - "integrity": "sha1-jffHquUf02h06PjQW5GAvBGj/tc=", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "clean-stack": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-1.3.0.tgz", - "integrity": "sha1-noIVAa6XmYbEax1m0tQy2y/UrjE=" - }, - "cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "requires": { - "restore-cursor": "^3.1.0" - } - }, - "cli-progress": { - "version": "3.8.2", - "resolved": "https://registry.npmjs.org/cli-progress/-/cli-progress-3.8.2.tgz", - "integrity": "sha512-qRwBxLldMSfxB+YGFgNRaj5vyyHe1yMpVeDL79c+7puGujdKJHQHydgqXDcrkvQgJ5U/d3lpf6vffSoVVUftVQ==", - "dev": true, - "requires": { - "colors": "^1.1.2", - "string-width": "^4.2.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "string-width": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", - "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, - "cli-ux": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/cli-ux/-/cli-ux-5.4.6.tgz", - "integrity": "sha512-EeiS2TzEndRVknCqE+8Ri8g0bsP617a1nq6n+3Trwft1JCDzyUNlX2J1fl7fwTgRPWtmBmiF6xIyueL5YGs65g==", - "dev": true, - "requires": { - "@oclif/command": "^1.6.0", - "@oclif/errors": "^1.2.1", - "@oclif/linewrap": "^1.0.0", - "@oclif/screen": "^1.0.3", - "ansi-escapes": "^4.3.0", - "ansi-styles": "^4.2.0", - "cardinal": "^2.1.1", - "chalk": "^2.4.1", - "clean-stack": "^2.0.0", - "cli-progress": "^3.4.0", - "extract-stack": "^1.0.0", - "fs-extra": "^7.0.1", - "hyperlinker": "^1.0.0", - "indent-string": "^4.0.0", - "is-wsl": "^1.1.0", - "js-yaml": "^3.13.1", - "lodash": "^4.17.11", - "natural-orderby": "^2.0.1", - "object-treeify": "^1.1.4", - "password-prompt": "^1.1.2", - "semver": "^5.6.0", - "string-width": "^3.1.0", - "strip-ansi": "^5.1.0", - "supports-color": "^5.5.0", - "supports-hyperlinks": "^1.0.1", - "tslib": "^1.9.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "cli-width": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.1.tgz", - "integrity": "sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw==", - "dev": true - }, - "cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dev": true, - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - }, - "dependencies": { - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - } - } - } - }, - "clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "color": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", - "requires": { - "color-convert": "^1.9.1", - "color-string": "^1.5.2" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "color-string": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", - "requires": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "colornames": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/colornames/-/colornames-1.1.1.tgz", - "integrity": "sha1-+IiQMGhcfE/54qVZ9Qd+t2qBb5Y=" - }, - "colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==" - }, - "colorspace": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.2.tgz", - "integrity": "sha512-vt+OoIP2d76xLhjwbBaucYlNSpPsrJWPlBTtwCpQKIu6/CSMutyzX93O/Do0qzpH3YoHEes8YEFXyZ797rEhzQ==", - "requires": { - "color": "3.0.x", - "text-hex": "1.0.x" - } - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "contains-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", - "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", - "dev": true - }, - "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "dev": true - }, - "convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - } - } - }, - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "cp-file": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/cp-file/-/cp-file-6.2.0.tgz", - "integrity": "sha512-fmvV4caBnofhPe8kOcitBwSn2f39QLjnAnGq3gO9dfd75mUytzKNZB1hde6QHunW2Rt+OwuBOMc3i1tNElbszA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "make-dir": "^2.0.0", - "nested-error-stacks": "^2.0.0", - "pify": "^4.0.1", - "safe-buffer": "^5.0.1" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - } - } - }, - "create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "requires": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "create-hmac": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "requires": { - "cipher-base": "^1.0.3", - "create-hash": "^1.1.0", - "inherits": "^2.0.1", - "ripemd160": "^2.0.0", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "requires": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "cuint": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz", - "integrity": "sha1-QICG1AlVDCYxFVYZ6fp7ytw7mRs=" - }, - "d": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", - "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", - "requires": { - "es5-ext": "^0.10.50", - "type": "^1.0.1" - } - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", - "requires": { - "ms": "^2.1.1" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true - }, - "decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=" - }, - "decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", - "dev": true, - "requires": { - "type-detect": "^4.0.0" - } - }, - "deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true - }, - "default-require-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-2.0.0.tgz", - "integrity": "sha1-9fj7sYp9bVCyH2QfZJ67Uiz+JPc=", - "dev": true, - "requires": { - "strip-bom": "^3.0.0" - }, - "dependencies": { - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, - "define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dev": true, - "requires": { - "object-keys": "^1.0.12" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" - }, - "detect-indent": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-6.0.0.tgz", - "integrity": "sha512-oSyFlqaTHCItVRGK5RmrmjB+CmaMOW7IaNA/kdxqhoa6d17j/5ce9O9eWXmV/KEdRwqpQA+Vqe8a8Bsybu4YnA==", - "dev": true - }, - "diagnostics": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/diagnostics/-/diagnostics-1.1.1.tgz", - "integrity": "sha512-8wn1PmdunLJ9Tqbx+Fx/ZEuHfJf4NKSN2ZBj7SJC/OWRWha843+WsTjqMe1B5E3p28jqBlp+mJ2fPVxPyNgYKQ==", - "requires": { - "colorspace": "1.1.x", - "enabled": "1.0.x", - "kuler": "1.0.x" - } - }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "requires": { - "path-type": "^4.0.0" - } - }, - "doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "drbg.js": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/drbg.js/-/drbg.js-1.0.1.tgz", - "integrity": "sha1-Pja2xCs3BDgjzbwzLVjzHiRFSAs=", - "requires": { - "browserify-aes": "^1.0.6", - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4" - } - }, - "duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "elliptic": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", - "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", - "requires": { - "bn.js": "^4.4.0", - "brorand": "^1.0.1", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.0", - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.0" - } - }, - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" - }, - "enabled": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/enabled/-/enabled-1.0.2.tgz", - "integrity": "sha1-ll9lE9LC0cX0ZStkouM5ZGf8L5M=", - "requires": { - "env-variable": "0.0.x" - } - }, - "encoding": { - "version": "0.1.12", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.12.tgz", - "integrity": "sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=", - "requires": { - "iconv-lite": "~0.4.13" - } - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "requires": { - "once": "^1.4.0" - } - }, - "env-variable": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/env-variable/-/env-variable-0.0.6.tgz", - "integrity": "sha512-bHz59NlBbtS0NhftmR8+ExBEekE7br0e01jw+kk0NDro7TtZzBYZ5ScGPs3OmwnpyfHTHOtr1Y6uedCdrIldtg==" - }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "requires": { - "is-arrayish": "^0.2.1" - } - }, - "es-abstract": { - "version": "1.17.5", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.5.tgz", - "integrity": "sha512-BR9auzDbySxOcfog0tLECW8l28eRGpDpU3Dm3Hp4q/N+VtLTmyj4EUN088XZWQDW/hzj6sYRDXeOFsaAODKvpg==", - "dev": true, - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.1.5", - "is-regex": "^1.0.5", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimleft": "^2.1.1", - "string.prototype.trimright": "^2.1.1" - } - }, - "es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "es5-ext": { - "version": "0.10.53", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", - "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", - "requires": { - "es6-iterator": "~2.0.3", - "es6-symbol": "~3.1.3", - "next-tick": "~1.0.0" - }, - "dependencies": { - "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" - } - } - }, - "es6-error": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", - "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", - "dev": true - }, - "es6-iterator": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", - "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", - "requires": { - "d": "1", - "es5-ext": "^0.10.35", - "es6-symbol": "^3.1.1" - } - }, - "es6-promise": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" - }, - "es6-symbol": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", - "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", - "requires": { - "d": "^1.0.1", - "ext": "^1.1.2" - } - }, - "es6-weak-map": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-2.0.3.tgz", - "integrity": "sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==", - "requires": { - "d": "1", - "es5-ext": "^0.10.46", - "es6-iterator": "^2.0.3", - "es6-symbol": "^3.1.1" - } - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" - }, - "eslint": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.1.0.tgz", - "integrity": "sha512-DfS3b8iHMK5z/YLSme8K5cge168I8j8o1uiVmFCgnnjxZQbCGyraF8bMl7Ju4yfBmCuxD7shOF7eqGkcuIHfsA==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.0.1", - "doctrine": "^3.0.0", - "eslint-scope": "^5.0.0", - "eslint-utils": "^2.0.0", - "eslint-visitor-keys": "^1.1.0", - "espree": "^7.0.0", - "esquery": "^1.2.0", - "esutils": "^2.0.2", - "file-entry-cache": "^5.0.1", - "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", - "globals": "^12.1.0", - "ignore": "^4.0.6", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "inquirer": "^7.0.0", - "is-glob": "^4.0.0", - "js-yaml": "^3.13.1", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash": "^4.17.14", - "minimatch": "^3.0.4", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "progress": "^2.0.0", - "regexpp": "^3.1.0", - "semver": "^7.2.1", - "strip-ansi": "^6.0.0", - "strip-json-comments": "^3.1.0", - "table": "^5.2.3", - "text-table": "^0.2.0", - "v8-compile-cache": "^2.0.3" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.0.0.tgz", - "integrity": "sha512-N9oWFcegS0sFr9oh1oz2d7Npos6vNoWW9HvtCg5N1KRFpUhaAhvTv5Y58g880fZaEYSNm3qDz8SU1UrGvp+n7A==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "requires": { - "type-fest": "^0.8.1" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true - }, - "semver": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", - "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", - "dev": true - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "eslint-ast-utils": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eslint-ast-utils/-/eslint-ast-utils-1.1.0.tgz", - "integrity": "sha512-otzzTim2/1+lVrlH19EfQQJEhVJSu0zOb9ygb3iapN6UlyaDtyRq4b5U1FuW0v1lRa9Fp/GJyHkSwm6NqABgCA==", - "dev": true, - "requires": { - "lodash.get": "^4.4.2", - "lodash.zip": "^4.2.0" - } - }, - "eslint-config-oclif": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-oclif/-/eslint-config-oclif-3.1.0.tgz", - "integrity": "sha512-Tqgy43cNXsSdhTLWW4RuDYGFhV240sC4ISSv/ZiUEg/zFxExSEUpRE6J+AGnkKY9dYwIW4C9b2YSUVv8z/miMA==", - "dev": true, - "requires": { - "eslint-config-xo-space": "^0.20.0", - "eslint-plugin-mocha": "^5.2.0", - "eslint-plugin-node": "^7.0.1", - "eslint-plugin-unicorn": "^6.0.1" - }, - "dependencies": { - "eslint-plugin-es": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-1.4.1.tgz", - "integrity": "sha512-5fa/gR2yR3NxQf+UXkeLeP8FBBl6tSgdrAz1+cF84v1FMM4twGwQoqTnn+QxFLcPOrF4pdKEJKDB/q9GoyJrCA==", - "dev": true, - "requires": { - "eslint-utils": "^1.4.2", - "regexpp": "^2.0.1" - } - }, - "eslint-plugin-node": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-7.0.1.tgz", - "integrity": "sha512-lfVw3TEqThwq0j2Ba/Ckn2ABdwmL5dkOgAux1rvOk6CO7A6yGyPI2+zIxN6FyNkp1X1X/BSvKOceD6mBWSj4Yw==", - "dev": true, - "requires": { - "eslint-plugin-es": "^1.3.1", - "eslint-utils": "^1.3.1", - "ignore": "^4.0.2", - "minimatch": "^3.0.4", - "resolve": "^1.8.1", - "semver": "^5.5.0" - } - }, - "eslint-plugin-unicorn": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-unicorn/-/eslint-plugin-unicorn-6.0.1.tgz", - "integrity": "sha512-hjy9LhTdtL7pz8WTrzS0CGXRkWK3VAPLDjihofj8JC+uxQLfXm0WwZPPPB7xKmcjRyoH+jruPHOCrHNEINpG/Q==", - "dev": true, - "requires": { - "clean-regexp": "^1.0.0", - "eslint-ast-utils": "^1.0.0", - "import-modules": "^1.1.0", - "lodash.camelcase": "^4.1.1", - "lodash.kebabcase": "^4.0.1", - "lodash.snakecase": "^4.0.1", - "lodash.upperfirst": "^4.2.0", - "safe-regex": "^1.1.0" - } - }, - "eslint-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", - "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^1.1.0" - } - }, - "ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true - }, - "regexpp": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", - "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", - "dev": true - } - } - }, - "eslint-config-standard": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz", - "integrity": "sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg==", - "dev": true - }, - "eslint-config-xo": { - "version": "0.24.2", - "resolved": "https://registry.npmjs.org/eslint-config-xo/-/eslint-config-xo-0.24.2.tgz", - "integrity": "sha512-ivQ7qISScW6gfBp+p31nQntz1rg34UCybd3uvlngcxt5Utsf4PMMi9QoAluLFcPUM5Tvqk4JGraR9qu3msKPKQ==", - "dev": true - }, - "eslint-config-xo-space": { - "version": "0.20.0", - "resolved": "https://registry.npmjs.org/eslint-config-xo-space/-/eslint-config-xo-space-0.20.0.tgz", - "integrity": "sha512-bOsoZA8M6v1HviDUIGVq1fLVnSu3mMZzn85m2tqKb73tSzu4GKD4Jd2Py4ZKjCgvCbRRByEB5HPC3fTMnnJ1uw==", - "dev": true, - "requires": { - "eslint-config-xo": "^0.24.0" - } - }, - "eslint-import-resolver-node": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.3.tgz", - "integrity": "sha512-b8crLDo0M5RSe5YG8Pu2DYBj71tSB6OvXkfzwbJU2w7y8P4/yo0MyF8jU26IEuEuHF2K5/gcAJE3LhQGqBBbVg==", - "dev": true, - "requires": { - "debug": "^2.6.9", - "resolve": "^1.13.1" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - } - } - }, - "eslint-module-utils": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.0.tgz", - "integrity": "sha512-6j9xxegbqe8/kZY8cYpcp0xhbK0EgJlg3g9mib3/miLaExuuwc3n5UEfSnU6hWMbT0FAYVvDbL9RrRgpUeQIvA==", - "dev": true, - "requires": { - "debug": "^2.6.9", - "pkg-dir": "^2.0.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "requires": { - "locate-path": "^2.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "requires": { - "p-limit": "^1.1.0" - } - }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pkg-dir": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", - "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", - "dev": true, - "requires": { - "find-up": "^2.1.0" - } - } - } - }, - "eslint-plugin-es": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", - "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", - "dev": true, - "requires": { - "eslint-utils": "^2.0.0", - "regexpp": "^3.0.0" - } - }, - "eslint-plugin-import": { - "version": "2.20.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.20.2.tgz", - "integrity": "sha512-FObidqpXrR8OnCh4iNsxy+WACztJLXAHBO5hK79T1Hc77PgQZkyDGA5Ag9xAvRpglvLNxhH/zSmZ70/pZ31dHg==", - "dev": true, - "requires": { - "array-includes": "^3.0.3", - "array.prototype.flat": "^1.2.1", - "contains-path": "^0.1.0", - "debug": "^2.6.9", - "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.2", - "eslint-module-utils": "^2.4.1", - "has": "^1.0.3", - "minimatch": "^3.0.4", - "object.values": "^1.1.0", - "read-pkg-up": "^2.0.0", - "resolve": "^1.12.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "doctrine": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", - "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", - "dev": true, - "requires": { - "esutils": "^2.0.2", - "isarray": "^1.0.0" - } - }, - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "requires": { - "locate-path": "^2.0.0" - } - }, - "load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "requires": { - "p-limit": "^1.1.0" - } - }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true - }, - "parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", - "dev": true, - "requires": { - "error-ex": "^1.2.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "dev": true, - "requires": { - "pify": "^2.0.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - }, - "read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "dev": true, - "requires": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - } - }, - "read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "dev": true, - "requires": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, - "eslint-plugin-mocha": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-5.3.0.tgz", - "integrity": "sha512-3uwlJVLijjEmBeNyH60nzqgA1gacUWLUmcKV8PIGNvj1kwP/CTgAWQHn2ayyJVwziX+KETkr9opNwT1qD/RZ5A==", - "dev": true, - "requires": { - "ramda": "^0.26.1" - } - }, - "eslint-plugin-node": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", - "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", - "dev": true, - "requires": { - "eslint-plugin-es": "^3.0.0", - "eslint-utils": "^2.0.0", - "ignore": "^5.1.1", - "minimatch": "^3.0.4", - "resolve": "^1.10.1", - "semver": "^6.1.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "eslint-plugin-promise": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", - "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==", - "dev": true - }, - "eslint-plugin-standard": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.1.tgz", - "integrity": "sha512-v/KBnfyaOMPmZc/dmc6ozOdWqekGp7bBGq4jLAecEfPGmfKiWS4sA8sC0LqiV9w5qmXAtXVn4M3p1jSyhY85SQ==", - "dev": true - }, - "eslint-scope": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz", - "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==", - "dev": true, - "requires": { - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - } - }, - "eslint-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.0.0.tgz", - "integrity": "sha512-0HCPuJv+7Wv1bACm8y5/ECVfYdfsAm9xmVb7saeFlxjPYALefjhbYoCkBjPdPzGH8wWyTpAez82Fh3VKYEZ8OA==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^1.1.0" - } - }, - "eslint-visitor-keys": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz", - "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==", - "dev": true - }, - "espree": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-7.0.0.tgz", - "integrity": "sha512-/r2XEx5Mw4pgKdyb7GNLQNsu++asx/dltf/CI8RFi9oGHxmQFgvLbc5Op4U6i8Oaj+kdslhJtVlEZeAqH5qOTw==", - "dev": true, - "requires": { - "acorn": "^7.1.1", - "acorn-jsx": "^5.2.0", - "eslint-visitor-keys": "^1.1.0" - } - }, - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" - }, - "esquery": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.3.1.tgz", - "integrity": "sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ==", - "dev": true, - "requires": { - "estraverse": "^5.1.0" - }, - "dependencies": { - "estraverse": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.1.0.tgz", - "integrity": "sha512-FyohXK+R0vE+y1nHLoBM7ZTyqRpqAlhdZHCWIWEviFLiGB8b04H6bQs8G+XTthacvT8VuwvteiP7RJSxMs8UEw==", - "dev": true - } - } - }, - "esrecurse": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", - "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", - "dev": true, - "requires": { - "estraverse": "^4.1.0" - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "event-emitter": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", - "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", - "requires": { - "d": "1", - "es5-ext": "~0.10.14" - } - }, - "eventemitter3": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.4.tgz", - "integrity": "sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ==" - }, - "evp_bytestokey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "requires": { - "md5.js": "^1.3.4", - "safe-buffer": "^5.1.1" - } - }, - "execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "ext": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", - "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", - "requires": { - "type": "^2.0.0" - }, - "dependencies": { - "type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz", - "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==" - } - } - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "dev": true, - "requires": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - }, - "dependencies": { - "tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dev": true, - "requires": { - "os-tmpdir": "~1.0.2" - } - } - } - }, - "extract-stack": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/extract-stack/-/extract-stack-1.0.0.tgz", - "integrity": "sha1-uXrK+UQe6iMyUpYktzL8WhyBZfo=", - "dev": true - }, - "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" - }, - "fancy-test": { - "version": "1.4.8", - "resolved": "https://registry.npmjs.org/fancy-test/-/fancy-test-1.4.8.tgz", - "integrity": "sha512-/uCv78YSAz4UOQ3ZptnxOq6qYhJDMtwFHQnsghzGl2g6uO2VNfJDKlyczqFpG+KueXe7phoeIS6hMU1x/qhizQ==", - "dev": true, - "requires": { - "@types/chai": "*", - "@types/lodash": "*", - "@types/mocha": "*", - "@types/node": "*", - "@types/sinon": "*", - "lodash": "^4.17.13", - "mock-stdin": "^0.3.1", - "stdout-stderr": "^0.1.9" - } - }, - "fast-deep-equal": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", - "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==" - }, - "fast-glob": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.2.tgz", - "integrity": "sha512-UDV82o4uQyljznxwMxyVRJgZZt3O5wENYojjzbaGEGZgeOxkLFf+V4cnUD+krzb2F72E18RhamkMZ7AdeggF7A==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.0", - "merge2": "^1.3.0", - "micromatch": "^4.0.2", - "picomatch": "^2.2.1" - } - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true - }, - "fast-safe-stringify": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" - }, - "fastq": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.8.0.tgz", - "integrity": "sha512-SMIZoZdLh/fgofivvIkmknUXyPnvxRE3DhtZ5Me3Mrsk5gyPL42F0xr51TdRXskBxHfMp+07bcYzfsYEsSQA9Q==", - "dev": true, - "requires": { - "reusify": "^1.0.4" - } - }, - "fecha": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fecha/-/fecha-2.3.3.tgz", - "integrity": "sha512-lUGBnIamTAwk4znq5BcqsDaxSmZ9nDVJaij6NvRt/Tg4R69gERA+otPKbS86ROw9nxVMw2/mp1fnaiWqbs6Sdg==" - }, - "figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "file-entry-cache": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", - "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", - "dev": true, - "requires": { - "flat-cache": "^2.0.1" - } - }, - "file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==" - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dev": true, - "requires": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dev": true, - "requires": { - "find-up": "^3.0.0" - } - } - } - }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "flat-cache": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", - "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", - "dev": true, - "requires": { - "flatted": "^2.0.0", - "rimraf": "2.6.3", - "write": "1.0.3" - }, - "dependencies": { - "rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", - "dev": true - }, - "foreground-child": { - "version": "1.5.6", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-1.5.6.tgz", - "integrity": "sha1-T9ca0t/elnibmApcCilZN8svXOk=", - "dev": true, - "requires": { - "cross-spawn": "^4", - "signal-exit": "^3.0.0" - }, - "dependencies": { - "cross-spawn": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-4.0.2.tgz", - "integrity": "sha1-e5JHYhwjrf3ThWAEqCPL45dCTUE=", - "dev": true, - "requires": { - "lru-cache": "^4.0.1", - "which": "^1.2.9" - } - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dev": true, - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", - "dev": true - } - } - }, - "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" - }, - "form-data": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", - "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - }, - "from2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", - "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - } - }, - "fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "dev": true - }, - "fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", - "requires": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, - "functional-red-black-tree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", - "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", - "dev": true - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true - }, - "get-func-name": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", - "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", - "dev": true - }, - "get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "requires": { - "pump": "^3.0.0" - } - }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "github-slugger": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.3.0.tgz", - "integrity": "sha512-gwJScWVNhFYSRDvURk/8yhcFBee6aFjye2a7Lhb2bUyRulpIoek9p0I9Kt7PT67d/nUlZbFu8L9RLiA0woQN8Q==", - "dev": true, - "requires": { - "emoji-regex": ">=6.0.0 <=6.1.1" - }, - "dependencies": { - "emoji-regex": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-6.1.1.tgz", - "integrity": "sha1-xs0OwbBkLio8Z6ETfvxeeW2k+I4=", - "dev": true - } - } - }, - "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", - "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - }, - "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true - }, - "globby": { - "version": "10.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-10.0.2.tgz", - "integrity": "sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg==", - "dev": true, - "requires": { - "@types/glob": "^7.1.1", - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.0.3", - "glob": "^7.1.3", - "ignore": "^5.1.1", - "merge2": "^1.2.3", - "slash": "^3.0.0" - } - }, - "got": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz", - "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", - "requires": { - "@sindresorhus/is": "^0.7.0", - "cacheable-request": "^2.1.1", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^3.0.0", - "into-stream": "^3.1.0", - "is-retry-allowed": "^1.1.0", - "isurl": "^1.0.0-alpha5", - "lowercase-keys": "^1.0.0", - "mimic-response": "^1.0.0", - "p-cancelable": "^0.4.0", - "p-timeout": "^2.0.1", - "pify": "^3.0.0", - "safe-buffer": "^5.1.1", - "timed-out": "^4.0.1", - "url-parse-lax": "^3.0.0", - "url-to-options": "^1.0.1" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=" - } - } - }, - "graceful-fs": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", - "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" - }, - "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" - }, - "har-validator": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", - "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", - "requires": { - "ajv": "^6.5.5", - "har-schema": "^2.0.0" - } - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, - "has-symbol-support-x": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", - "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==" - }, - "has-symbols": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", - "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", - "dev": true - }, - "has-to-string-tag-x": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", - "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", - "requires": { - "has-symbol-support-x": "^1.4.1" - } - }, - "hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "requires": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, - "hasha": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/hasha/-/hasha-3.0.0.tgz", - "integrity": "sha1-UqMvq4Vp1BymmmH/GiFPjrfIvTk=", - "dev": true, - "requires": { - "is-stream": "^1.0.1" - } - }, - "hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "requires": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "hosted-git-info": { - "version": "2.8.8", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.8.tgz", - "integrity": "sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg==", - "dev": true - }, - "html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true - }, - "http-cache-semantics": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", - "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==" - }, - "http-call": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/http-call/-/http-call-5.3.0.tgz", - "integrity": "sha512-ahwimsC23ICE4kPl9xTBjKB4inbRaeLyZeRunC/1Jy/Z6X8tv22MEAjK+KBOMSVLaqXPTTmd8638waVIKLGx2w==", - "dev": true, - "requires": { - "content-type": "^1.0.4", - "debug": "^4.1.1", - "is-retry-allowed": "^1.1.0", - "is-stream": "^2.0.0", - "parse-json": "^4.0.0", - "tunnel-agent": "^0.6.0" - }, - "dependencies": { - "is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", - "dev": true - } - } - }, - "http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - } - }, - "hyperlinker": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hyperlinker/-/hyperlinker-1.0.0.tgz", - "integrity": "sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ==", - "dev": true - }, - "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "ieee754": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" - }, - "ignore": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.4.tgz", - "integrity": "sha512-MzbUSahkTW1u7JpKKjY7LCARd1fU5W2rLdxlM4kdkayuCwZImjkpluF9CM1aLewYJguPDqewLam18Y6AU69A8A==", - "dev": true - }, - "import-fresh": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", - "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", - "dev": true, - "requires": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - } - }, - "import-modules": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/import-modules/-/import-modules-1.1.0.tgz", - "integrity": "sha1-dI23nFzEK7lwHvq0JPiU5yYA6dw=", - "dev": true - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true - }, - "indent-string": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", - "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=" - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "inquirer": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.1.0.tgz", - "integrity": "sha512-5fJMWEmikSYu0nv/flMc475MhGbB7TSPd/2IpFV4I4rMklboCH2rQjYY5kKiYGHqUF9gvaambupcJFFG9dvReg==", - "dev": true, - "requires": { - "ansi-escapes": "^4.2.1", - "chalk": "^3.0.0", - "cli-cursor": "^3.1.0", - "cli-width": "^2.0.0", - "external-editor": "^3.0.3", - "figures": "^3.0.0", - "lodash": "^4.17.15", - "mute-stream": "0.0.8", - "run-async": "^2.4.0", - "rxjs": "^6.5.3", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0", - "through": "^2.3.6" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "string-width": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", - "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "interpret": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.2.0.tgz", - "integrity": "sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==" - }, - "into-stream": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz", - "integrity": "sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY=", - "requires": { - "from2": "^2.1.1", - "p-is-promise": "^1.1.0" - } - }, - "ip-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.1.0.tgz", - "integrity": "sha512-pKnZpbgCTfH/1NLIlOduP/V+WRXzC2MOz3Qo8xmxk8C5GudJLgK5QyLVXOSWy3ParAH7Eemurl3xjv/WXYFvMA==" - }, - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - }, - "is-callable": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.5.tgz", - "integrity": "sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q==", - "dev": true - }, - "is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", - "dev": true - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" - }, - "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dev": true, - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true - }, - "is-object": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz", - "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA=" - }, - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=" - }, - "is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" - }, - "is-regex": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.5.tgz", - "integrity": "sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ==", - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, - "is-retry-allowed": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", - "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" - }, - "is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", - "dev": true - }, - "is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", - "dev": true, - "requires": { - "has-symbols": "^1.0.1" - } - }, - "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "is-wsl": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", - "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", - "dev": true - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "isomorphic-fetch": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz", - "integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=", - "requires": { - "node-fetch": "^1.0.1", - "whatwg-fetch": ">=0.10.0" - } - }, - "isomorphic-ws": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz", - "integrity": "sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==" - }, - "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "istanbul-lib-coverage": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz", - "integrity": "sha512-8aXznuEPCJvGnMSRft4udDRDtb1V3pkQkMMI5LI+6HuQz5oQ4J2UFn1H82raA3qJtyOLkkwVqICBQkjnGtn5mA==", - "dev": true - }, - "istanbul-lib-hook": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-2.0.7.tgz", - "integrity": "sha512-vrRztU9VRRFDyC+aklfLoeXyNdTfga2EI3udDGn4cZ6fpSXpHLV9X6CHvfoMCPtggg8zvDDmC4b9xfu0z6/llA==", - "dev": true, - "requires": { - "append-transform": "^1.0.0" - } - }, - "istanbul-lib-instrument": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-3.3.0.tgz", - "integrity": "sha512-5nnIN4vo5xQZHdXno/YDXJ0G+I3dAm4XgzfSVTPLQpj/zAV2dV6Juy0yaf10/zrJOJeHoN3fraFe+XRq2bFVZA==", - "dev": true, - "requires": { - "@babel/generator": "^7.4.0", - "@babel/parser": "^7.4.3", - "@babel/template": "^7.4.0", - "@babel/traverse": "^7.4.3", - "@babel/types": "^7.4.0", - "istanbul-lib-coverage": "^2.0.5", - "semver": "^6.0.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "istanbul-lib-report": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-2.0.8.tgz", - "integrity": "sha512-fHBeG573EIihhAblwgxrSenp0Dby6tJMFR/HvlerBsrCTD5bkUuoNtn3gVh29ZCS824cGGBPn7Sg7cNk+2xUsQ==", - "dev": true, - "requires": { - "istanbul-lib-coverage": "^2.0.5", - "make-dir": "^2.1.0", - "supports-color": "^6.1.0" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "istanbul-lib-source-maps": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-3.0.6.tgz", - "integrity": "sha512-R47KzMtDJH6X4/YW9XTx+jrLnZnscW4VpNN+1PViSYTejLVPWv7oov+Duf8YQSPyVRUvueQqz1TcsC6mooZTXw==", - "dev": true, - "requires": { - "debug": "^4.1.1", - "istanbul-lib-coverage": "^2.0.5", - "make-dir": "^2.1.0", - "rimraf": "^2.6.3", - "source-map": "^0.6.1" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - } - } - }, - "istanbul-reports": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-2.2.7.tgz", - "integrity": "sha512-uu1F/L1o5Y6LzPVSVZXNOoD/KXpJue9aeLRd0sM9uMXfZvzomB0WxVamWb5ue8kA2vVWEmW7EG+A5n3f1kqHKg==", - "dev": true, - "requires": { - "html-escaper": "^2.0.0" - } - }, - "isurl": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz", - "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", - "requires": { - "has-to-string-tag-x": "^1.2.0", - "is-object": "^1.0.1" - } - }, - "js-sha3": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", - "integrity": "sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==" - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "dev": true - }, - "json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" - }, - "json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true - }, - "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", - "dev": true - }, - "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "requires": { - "graceful-fs": "^4.1.6" - } - }, - "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "keyv": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz", - "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", - "requires": { - "json-buffer": "3.0.0" - } - }, - "kuler": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/kuler/-/kuler-1.0.1.tgz", - "integrity": "sha512-J9nVUucG1p/skKul6DU3PUZrhs0LPulNaeUOox0IyXDi8S4CztTHs1gQphhuZmzXG7VOQSf6NJfKuzteQLv9gQ==", - "requires": { - "colornames": "^1.1.1" - } - }, - "levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - } - }, - "lines-and-columns": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", - "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", - "dev": true - }, - "load-json-file": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-6.2.0.tgz", - "integrity": "sha512-gUD/epcRms75Cw8RT1pUdHugZYM5ce64ucs2GEISABwkRsOQr0q2wm/MV2TKThycIe5e0ytRweW2RZxclogCdQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.15", - "parse-json": "^5.0.0", - "strip-bom": "^4.0.0", - "type-fest": "^0.6.0" - }, - "dependencies": { - "parse-json": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz", - "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1", - "lines-and-columns": "^1.1.6" - } - }, - "type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true - } - } - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "requires": { - "p-locate": "^4.1.0" - } - }, - "lodash": { - "version": "4.17.20", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", - "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==" - }, - "lodash._reinterpolate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0=" - }, - "lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=", - "dev": true - }, - "lodash.flattendeep": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", - "integrity": "sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI=", - "dev": true - }, - "lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true - }, - "lodash.kebabcase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz", - "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY=", - "dev": true - }, - "lodash.snakecase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz", - "integrity": "sha1-OdcUo1NXFHg3rv1ktdy7Fr7Nj40=", - "dev": true - }, - "lodash.template": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", - "requires": { - "lodash._reinterpolate": "^3.0.0", - "lodash.templatesettings": "^4.0.0" - } - }, - "lodash.templatesettings": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", - "requires": { - "lodash._reinterpolate": "^3.0.0" - } - }, - "lodash.upperfirst": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/lodash.upperfirst/-/lodash.upperfirst-4.3.1.tgz", - "integrity": "sha1-E2Xt9DFIBIHvDRxolXpe2Z1J984=", - "dev": true - }, - "lodash.zip": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.zip/-/lodash.zip-4.2.0.tgz", - "integrity": "sha1-7GZi5IlkCO1KtsVCo5kLcswIACA=", - "dev": true - }, - "logform": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/logform/-/logform-2.1.2.tgz", - "integrity": "sha512-+lZh4OpERDBLqjiwDLpAWNQu6KMjnlXH2ByZwCuSqVPJletw0kTWJf5CgSNAUKn1KUkv3m2cUz/LK8zyEy7wzQ==", - "requires": { - "colors": "^1.2.1", - "fast-safe-stringify": "^2.0.4", - "fecha": "^2.3.3", - "ms": "^2.1.1", - "triple-beam": "^1.3.0" - } - }, - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, - "lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" - }, - "lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "requires": { - "yallist": "^3.0.2" - } - }, - "lru-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/lru-queue/-/lru-queue-0.1.0.tgz", - "integrity": "sha1-Jzi9nw089PhEkMVzbEhpmsYyzaM=", - "requires": { - "es5-ext": "~0.10.2" - } - }, - "make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "requires": { - "semver": "^6.0.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "memoizee": { - "version": "0.4.14", - "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.4.14.tgz", - "integrity": "sha512-/SWFvWegAIYAO4NQMpcX+gcra0yEZu4OntmUdrBaWrJncxOqAziGFlHxc7yjKVK2uu3lpPW27P27wkR82wA8mg==", - "requires": { - "d": "1", - "es5-ext": "^0.10.45", - "es6-weak-map": "^2.0.2", - "event-emitter": "^0.3.5", - "is-promise": "^2.1", - "lru-queue": "0.1", - "next-tick": "1", - "timers-ext": "^0.1.5" - } - }, - "merge-source-map": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz", - "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==", - "dev": true, - "requires": { - "source-map": "^0.6.1" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - } - } - }, - "merge2": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.3.0.tgz", - "integrity": "sha512-2j4DAdlBOkiSZIsaXk4mTE3sRS02yBHAtfy127xRV3bQUFqXkjHCHLW6Scv7DwNRbIWNHH8zpnz9zMaKXIdvYw==", - "dev": true - }, - "micromatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", - "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", - "dev": true, - "requires": { - "braces": "^3.0.1", - "picomatch": "^2.0.5" - } - }, - "mime-db": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", - "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==" - }, - "mime-types": { - "version": "2.1.27", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", - "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", - "requires": { - "mime-db": "1.44.0" - } - }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true - }, - "mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" - }, - "minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true - }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "requires": { - "minimist": "^1.2.5" - } - }, - "mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", - "dev": true - }, - "mock-stdin": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/mock-stdin/-/mock-stdin-0.3.1.tgz", - "integrity": "sha1-xlfZZC2QeGQ1xkyl6Zu9TQm9fdM=", - "dev": true - }, - "moment": { - "version": "2.26.0", - "resolved": "https://registry.npmjs.org/moment/-/moment-2.26.0.tgz", - "integrity": "sha512-oIixUO+OamkUkwjhAVE18rAMfRJNsNe/Stid/gwHSOfHrOtw9EhAY2AHvdKZ/k/MggcYELFCJz/Sn2pL8b8JMw==" - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "mute-stream": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", - "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", - "dev": true - }, - "nan": { - "version": "2.14.1", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", - "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==" - }, - "natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", - "dev": true - }, - "natural-orderby": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/natural-orderby/-/natural-orderby-2.0.3.tgz", - "integrity": "sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q==", - "dev": true - }, - "nested-error-stacks": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/nested-error-stacks/-/nested-error-stacks-2.1.0.tgz", - "integrity": "sha512-AO81vsIO1k1sM4Zrd6Hu7regmJN1NSiAja10gc4bX3F0wd+9rQmcuHQaHVQCYIEC8iFXnE+mavh23GOt7wBgug==", - "dev": true - }, - "next-tick": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", - "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" - }, - "nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "nock": { - "version": "12.0.3", - "resolved": "https://registry.npmjs.org/nock/-/nock-12.0.3.tgz", - "integrity": "sha512-QNb/j8kbFnKCiyqi9C5DD0jH/FubFGj5rt9NQFONXwQm3IPB0CULECg/eS3AU1KgZb/6SwUa4/DTRKhVxkGABw==", - "requires": { - "debug": "^4.1.0", - "json-stringify-safe": "^5.0.1", - "lodash": "^4.17.13", - "propagate": "^2.0.0" - } - }, - "node-fetch": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-1.7.3.tgz", - "integrity": "sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ==", - "requires": { - "encoding": "^0.1.11", - "is-stream": "^1.0.1" - } - }, - "node-forge": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.8.5.tgz", - "integrity": "sha512-vFMQIWt+J/7FLNyKouZ9TazT74PRV3wgv9UT4cRjC8BffxFbKXkgIWR42URCPSnHm/QDz6BOlb2Q0U4+VQT67Q==" - }, - "node-jose": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/node-jose/-/node-jose-1.1.4.tgz", - "integrity": "sha512-L31IFwL3pWWcMHxxidCY51ezqrDXMkvlT/5pLTfNw5sXmmOLJuN6ug7txzF/iuZN55cRpyOmoJrotwBQIoo5Lw==", - "requires": { - "base64url": "^3.0.1", - "browserify-zlib": "^0.2.0", - "buffer": "^5.5.0", - "es6-promise": "^4.2.8", - "lodash": "^4.17.15", - "long": "^4.0.0", - "node-forge": "^0.8.5", - "process": "^0.11.10", - "react-zlib-js": "^1.0.4", - "uuid": "^3.3.3" - } - }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "normalize-url": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", - "requires": { - "prepend-http": "^2.0.0", - "query-string": "^5.0.1", - "sort-keys": "^2.0.0" - } - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "requires": { - "path-key": "^2.0.0" - } - }, - "nyc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/nyc/-/nyc-14.1.1.tgz", - "integrity": "sha512-OI0vm6ZGUnoGZv/tLdZ2esSVzDwUC88SNs+6JoSOMVxA+gKMB8Tk7jBwgemLx4O40lhhvZCVw1C+OYLOBOPXWw==", - "dev": true, - "requires": { - "archy": "^1.0.0", - "caching-transform": "^3.0.2", - "convert-source-map": "^1.6.0", - "cp-file": "^6.2.0", - "find-cache-dir": "^2.1.0", - "find-up": "^3.0.0", - "foreground-child": "^1.5.6", - "glob": "^7.1.3", - "istanbul-lib-coverage": "^2.0.5", - "istanbul-lib-hook": "^2.0.7", - "istanbul-lib-instrument": "^3.3.0", - "istanbul-lib-report": "^2.0.8", - "istanbul-lib-source-maps": "^3.0.6", - "istanbul-reports": "^2.2.4", - "js-yaml": "^3.13.1", - "make-dir": "^2.1.0", - "merge-source-map": "^1.1.0", - "resolve-from": "^4.0.0", - "rimraf": "^2.6.3", - "signal-exit": "^3.0.2", - "spawn-wrap": "^1.4.2", - "test-exclude": "^5.2.3", - "uuid": "^3.3.2", - "yargs": "^13.2.2", - "yargs-parser": "^13.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - } - } - }, - "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" - }, - "object-hash": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-1.3.1.tgz", - "integrity": "sha512-OSuu/pU4ENM9kmREg0BdNrUDIl1heYa4mBZacJc+vVWz4GtAwu7jO8s4AIt2aGRUTqxykpWzI3Oqnsm13tTMDA==" - }, - "object-inspect": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.7.0.tgz", - "integrity": "sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw==", - "dev": true - }, - "object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true - }, - "object-treeify": { - "version": "1.1.24", - "resolved": "https://registry.npmjs.org/object-treeify/-/object-treeify-1.1.24.tgz", - "integrity": "sha512-ttlIN3MoqnhevarRtDNELvNjQ85Wguq2zSkR2N9DGFM3pFWMjsz7tSqbjX7lx16BmFwLOwBa3w0TY1jJajklFg==", - "dev": true - }, - "object.assign": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", - "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", - "dev": true, - "requires": { - "define-properties": "^1.1.2", - "function-bind": "^1.1.1", - "has-symbols": "^1.0.0", - "object-keys": "^1.0.11" - } - }, - "object.values": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.1.tgz", - "integrity": "sha512-WTa54g2K8iu0kmS/us18jEmdv1a4Wi//BZ/DTVYEcH0XhLM5NYdpDHja3gt57VrZLcNAO2WGA+KpWsDBaHt6eA==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1", - "function-bind": "^1.1.1", - "has": "^1.0.3" - } - }, - "oidc-token-hash": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-3.0.2.tgz", - "integrity": "sha512-dTzp80/y/da+um+i+sOucNqiPpwRL7M/xPwj7pH1TFA2/bqQ+OK2sJahSXbemEoLtPkHcFLyhLhLWZa9yW5+RA==" - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "requires": { - "wrappy": "1" - } - }, - "one-time": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/one-time/-/one-time-0.0.4.tgz", - "integrity": "sha1-+M33eISCb+Tf+T46nMN7HkSAdC4=" - }, - "onetime": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", - "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", - "dev": true, - "requires": { - "mimic-fn": "^2.1.0" - } - }, - "openid-client": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-2.5.0.tgz", - "integrity": "sha512-t3hFD7xEoW1U25RyBcRFaL19fGGs6hNVTysq9pgmiltH0IVUPzH/bQV9w24pM5Q7MunnGv2/5XjIru6BQcWdxg==", - "requires": { - "base64url": "^3.0.0", - "got": "^8.3.2", - "lodash": "^4.17.11", - "lru-cache": "^5.1.1", - "node-jose": "^1.1.0", - "object-hash": "^1.3.1", - "oidc-token-hash": "^3.0.1", - "p-any": "^1.1.0" - } - }, - "optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", - "dev": true, - "requires": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - } - }, - "os-homedir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", - "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", - "dev": true - }, - "os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", - "dev": true - }, - "p-any": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-any/-/p-any-1.1.0.tgz", - "integrity": "sha512-Ef0tVa4CZ5pTAmKn+Cg3w8ABBXh+hHO1aV8281dKOoUHfX+3tjG2EaFcC+aZyagg9b4EYGsHEjz21DnEE8Og2g==", - "requires": { - "p-some": "^2.0.0" - } - }, - "p-cancelable": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz", - "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==" - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" - }, - "p-is-promise": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", - "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4=" - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-some": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/p-some/-/p-some-2.0.1.tgz", - "integrity": "sha1-Zdh8ixVO289SIdFnd4ttLhUPbwY=", - "requires": { - "aggregate-error": "^1.0.0" - } - }, - "p-timeout": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", - "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", - "requires": { - "p-finally": "^1.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "package-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-3.0.0.tgz", - "integrity": "sha512-lOtmukMDVvtkL84rJHI7dpTYq+0rli8N2wlnqUcBuDWCfVhRUfOmnR9SsoHFMLpACvEV60dX7rd0rFaYDZI+FA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.15", - "hasha": "^3.0.0", - "lodash.flattendeep": "^4.4.0", - "release-zalgo": "^1.0.0" - } - }, - "pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" - }, - "parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "requires": { - "callsites": "^3.0.0" - } - }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - } - }, - "password-prompt": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/password-prompt/-/password-prompt-1.1.2.tgz", - "integrity": "sha512-bpuBhROdrhuN3E7G/koAju0WjVw9/uQOG5Co5mokNj0MiOSBVZS1JTwM4zl55hu0WFmIEFvO9cU9sJQiBIYeIA==", - "dev": true, - "requires": { - "ansi-escapes": "^3.1.0", - "cross-spawn": "^6.0.5" - }, - "dependencies": { - "ansi-escapes": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", - "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", - "dev": true - } - } - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" - }, - "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true - }, - "pathval": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz", - "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=", - "dev": true - }, - "pbkdf2": { - "version": "3.0.17", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz", - "integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==", - "requires": { - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4", - "ripemd160": "^2.0.1", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "picomatch": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", - "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", - "dev": true - }, - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=" - }, - "pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dev": true, - "requires": { - "find-up": "^4.0.0" - } - }, - "prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true - }, - "prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" - }, - "process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=" - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true - }, - "propagate": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", - "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==" - }, - "pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", - "dev": true - }, - "psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" - }, - "qqjs": { - "version": "0.3.11", - "resolved": "https://registry.npmjs.org/qqjs/-/qqjs-0.3.11.tgz", - "integrity": "sha512-pB2X5AduTl78J+xRSxQiEmga1jQV0j43jOPs/MTgTLApGFEOn6NgdE2dEjp7nvDtjkIOZbvFIojAiYUx6ep3zg==", - "dev": true, - "requires": { - "chalk": "^2.4.1", - "debug": "^4.1.1", - "execa": "^0.10.0", - "fs-extra": "^6.0.1", - "get-stream": "^5.1.0", - "glob": "^7.1.2", - "globby": "^10.0.1", - "http-call": "^5.1.2", - "load-json-file": "^6.2.0", - "pkg-dir": "^4.2.0", - "tar-fs": "^2.0.0", - "tmp": "^0.1.0", - "write-json-file": "^4.1.1" - }, - "dependencies": { - "execa": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.10.0.tgz", - "integrity": "sha512-7XOMnz8Ynx1gGo/3hyV9loYNPWM94jG3+3T3Y8tsfSstFmETmENCMU/A/zj8Lyaj1lkgEepKepvd6240tBRvlw==", - "dev": true, - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", - "dev": true - } - } - }, - "fs-extra": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-6.0.1.tgz", - "integrity": "sha512-GnyIkKhhzXZUWFCaJzvyDLEEgDkPfb4/TPvJCJVuS8MWZgoSsErf++QpiAlDnKFcqhRlm+tIOcencCjyJE6ZCA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "get-stream": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", - "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", - "dev": true, - "requires": { - "pump": "^3.0.0" - } - } - } - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" - }, - "query-string": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", - "requires": { - "decode-uri-component": "^0.2.0", - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" - } - }, - "ramda": { - "version": "0.26.1", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.26.1.tgz", - "integrity": "sha512-hLWjpy7EnsDBb0p+Z3B7rPi3GDeRG5ZtiI33kJhTt+ORCd38AbAIjB/9zRIUoeTbE/AVX5ZkU7m6bznsvrf8eQ==", - "dev": true - }, - "randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "requires": { - "safe-buffer": "^5.1.0" - } - }, - "react-zlib-js": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/react-zlib-js/-/react-zlib-js-1.0.4.tgz", - "integrity": "sha512-ynXD9DFxpE7vtGoa3ZwBtPmZrkZYw2plzHGbanUjBOSN4RtuXdektSfABykHtTiWEHMh7WdYj45LHtp228ZF1A==" - }, - "read-pkg": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", - "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", - "dev": true, - "requires": { - "load-json-file": "^4.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^3.0.0" - }, - "dependencies": { - "load-json-file": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", - "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^4.0.0", - "pify": "^3.0.0", - "strip-bom": "^3.0.0" - } - }, - "path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dev": true, - "requires": { - "pify": "^3.0.0" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, - "read-pkg-up": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-4.0.0.tgz", - "integrity": "sha512-6etQSH7nJGsK0RbG/2TeDzZFa8shjQ1um+SwQQ5cwKy0dhSXdOncEhb1CPpvQG4h7FyOV6EB6YlV0yJvZQNAkA==", - "dev": true, - "requires": { - "find-up": "^3.0.0", - "read-pkg": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - } - } - }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", - "requires": { - "resolve": "^1.1.6" - } - }, - "redeyed": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz", - "integrity": "sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs=", - "dev": true, - "requires": { - "esprima": "~4.0.0" - } - }, - "regenerator-runtime": { - "version": "0.13.5", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz", - "integrity": "sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA==" - }, - "regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", - "dev": true - }, - "release-zalgo": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", - "integrity": "sha1-CXALflB0Mpc5Mw5TXFqQ+2eFFzA=", - "dev": true, - "requires": { - "es6-error": "^4.0.1" - } - }, - "request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "dependencies": { - "form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - } - } - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true - }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", - "dev": true - }, - "resolve": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz", - "integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==", - "requires": { - "path-parse": "^1.0.6" - } - }, - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true - }, - "responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "requires": { - "lowercase-keys": "^1.0.0" - } - }, - "restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "requires": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - } - }, - "ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "dev": true - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true - }, - "rfc4648": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.3.0.tgz", - "integrity": "sha512-x36K12jOflpm1V8QjPq3I+pt7Z1xzeZIjiC8J2Oxd7bE1efTrOG241DTYVJByP/SxR9jl1t7iZqYxDX864jgBQ==" - }, - "rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, - "ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, - "run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", - "dev": true - }, - "run-parallel": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.9.tgz", - "integrity": "sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q==", - "dev": true - }, - "rxjs": { - "version": "6.5.5", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.5.tgz", - "integrity": "sha512-WfQI+1gohdf0Dai/Bbmk5L5ItH5tYqm3ki2c5GdWhKjalzjg93N3avFjVStyZZz+A2Em+ZxKH5bNghw9UeylGQ==", - "requires": { - "tslib": "^1.9.0" - } - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "safe-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", - "dev": true, - "requires": { - "ret": "~0.1.10" - } - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", - "dev": true - }, - "sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" - }, - "shelljs": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz", - "integrity": "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==", - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - } - }, - "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "requires": { - "is-arrayish": "^0.3.1" - }, - "dependencies": { - "is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - } - } - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true - }, - "slice-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", - "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "astral-regex": "^1.0.0", - "is-fullwidth-code-point": "^2.0.0" - } - }, - "sort-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", - "requires": { - "is-plain-obj": "^1.0.0" - } - }, - "source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true - }, - "spawn-wrap": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-1.4.3.tgz", - "integrity": "sha512-IgB8md0QW/+tWqcavuFgKYR/qIRvJkRLPJDFaoXtLLUaVcCDK0+HeFTkmQHj3eprcYhc+gOl0aEA1w7qZlYezw==", - "dev": true, - "requires": { - "foreground-child": "^1.5.6", - "mkdirp": "^0.5.0", - "os-homedir": "^1.0.1", - "rimraf": "^2.6.2", - "signal-exit": "^3.0.2", - "which": "^1.3.0" - } - }, - "spdx-correct": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", - "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", - "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", - "dev": true - }, - "sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, - "stack-trace": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", - "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" - }, - "stdout-stderr": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/stdout-stderr/-/stdout-stderr-0.1.13.tgz", - "integrity": "sha512-Xnt9/HHHYfjZ7NeQLvuQDyL1LnbsbddgMFKCuaQKwGCdJm8LnstZIXop+uOY36UR1UXXoHXfMbC1KlVdVd2JLA==", - "dev": true, - "requires": { - "debug": "^4.1.1", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, - "strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "string.prototype.trimend": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.1.tgz", - "integrity": "sha512-LRPxFUaTtpqYsTeNKaFOw3R4bxIzWOnbQ837QfBylo8jIxtcbK/A/sMV7Q+OAV/vWo+7s25pOE10KYSjaSO06g==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - } - }, - "string.prototype.trimleft": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.2.tgz", - "integrity": "sha512-gCA0tza1JBvqr3bfAIFJGqfdRTyPae82+KTnm3coDXkZN9wnuW3HjGgN386D7hfv5CHQYCI022/rJPVlqXyHSw==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5", - "string.prototype.trimstart": "^1.0.0" - } - }, - "string.prototype.trimright": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.2.tgz", - "integrity": "sha512-ZNRQ7sY3KroTaYjRS6EbNiiHrOkjihL9aQE/8gfQ4DtAC/aEBRHFJa44OmoWxGGqXuJlfKkZW4WcXErGr+9ZFg==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5", - "string.prototype.trimend": "^1.0.0" - } - }, - "string.prototype.trimstart": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.1.tgz", - "integrity": "sha512-XxZn+QpvrBI1FOcg6dIpxUPgWCPuNXvMD72aaRaUQv1eD4e/Qy8i/hFTe0BUmD60p/QA6bh1avmuPTfNjqVWRw==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "requires": { - "ansi-regex": "^4.1.0" - } - }, - "strip-bom": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", - "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", - "dev": true - }, - "strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=" - }, - "strip-json-comments": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.0.tgz", - "integrity": "sha512-e6/d0eBu7gHtdCqFt0xJr642LdToM5/cN4Qb9DbHjVx1CP5RyeM+zH7pbecEmDv/lBqb0QH+6Uqq75rxFPkM0w==", - "dev": true - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - }, - "supports-hyperlinks": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-1.0.1.tgz", - "integrity": "sha512-HHi5kVSefKaJkGYXbDuKbUGRVxqnWGn3J2e39CYcNJEfWciGq2zYtOhXLTlvrOZW1QU7VX67w7fMmWafHX9Pfw==", - "dev": true, - "requires": { - "has-flag": "^2.0.0", - "supports-color": "^5.0.0" - }, - "dependencies": { - "has-flag": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-2.0.0.tgz", - "integrity": "sha1-6CB68cx7MNRGzHC3NLXovhj4jVE=", - "dev": true - } - } - }, - "table": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", - "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", - "dev": true, - "requires": { - "ajv": "^6.10.2", - "lodash": "^4.17.14", - "slice-ansi": "^2.1.0", - "string-width": "^3.0.0" - }, - "dependencies": { - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "tar-fs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.0.tgz", - "integrity": "sha512-9uW5iDvrIMCVpvasdFHW0wJPez0K4JnMZtsuIeDI7HyMGJNxmDZDOCQROr7lXyS+iL/QMpj07qcjGYTSdRFXUg==", - "dev": true, - "requires": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.0.0" - } - }, - "tar-stream": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.1.2.tgz", - "integrity": "sha512-UaF6FoJ32WqALZGOIAApXx+OdxhekNMChu6axLJR85zMMjXKWFGjbIRe+J6P4UnRGg9rAwWvbTT0oI7hD/Un7Q==", - "dev": true, - "requires": { - "bl": "^4.0.1", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "test-exclude": { - "version": "5.2.3", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-5.2.3.tgz", - "integrity": "sha512-M+oxtseCFO3EDtAaGH7iiej3CBkzXqFMbzqYAACdzKui4eZA+pq3tZEwChvOdNfa7xxy8BfbmgJSIr43cC/+2g==", - "dev": true, - "requires": { - "glob": "^7.1.3", - "minimatch": "^3.0.4", - "read-pkg-up": "^4.0.0", - "require-main-filename": "^2.0.0" - } - }, - "text-hex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", - "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" - }, - "text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", - "dev": true - }, - "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", - "dev": true - }, - "timed-out": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz", - "integrity": "sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8=" - }, - "timers-ext": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.7.tgz", - "integrity": "sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==", - "requires": { - "es5-ext": "~0.10.46", - "next-tick": "1" - } - }, - "tmp": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.1.0.tgz", - "integrity": "sha512-J7Z2K08jbGcdA1kkQpJSqLF6T0tdQqpR2pnSUXsIchbPdTI9v3e85cLW0d6WDhwuAleOV71j2xWs8qMPfK7nKw==", - "dev": true, - "requires": { - "rimraf": "^2.6.3" - } - }, - "to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "dev": true - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "requires": { - "is-number": "^7.0.0" - } - }, - "tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - } - }, - "triple-beam": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", - "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" - }, - "tslib": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.13.0.tgz", - "integrity": "sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q==" - }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "type": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", - "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==" - }, - "type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1" - } - }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "dev": true - }, - "type-fest": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", - "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==", - "dev": true - }, - "typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "requires": { - "is-typedarray": "^1.0.0" - } - }, - "underscore": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.10.2.tgz", - "integrity": "sha512-N4P+Q/BuyuEKFJ43B9gYuOj4TQUHXX+j2FqguVOpjkssLUUrnJofCcBccJSCoeturDoZU6GorDTHSvUDlSQbTg==" - }, - "universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" - }, - "unorm": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/unorm/-/unorm-1.6.0.tgz", - "integrity": "sha512-b2/KCUlYZUeA7JFUuRJZPUtr4gZvBh7tavtv4fvk4+KV9pfGiR6CQAQAWl49ZpR3ts2dk4FYkP7EIgDJoiOLDA==" - }, - "uri-js": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", - "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", - "requires": { - "punycode": "^2.1.0" - } - }, - "url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "requires": { - "prepend-http": "^2.0.0" - } - }, - "url-to-options": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz", - "integrity": "sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k=" - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" - }, - "v8-compile-cache": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz", - "integrity": "sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==", - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "websocket": { - "version": "1.0.31", - "resolved": "https://registry.npmjs.org/websocket/-/websocket-1.0.31.tgz", - "integrity": "sha512-VAouplvGKPiKFDTeCCO65vYHsyay8DqoBSlzIO3fayrfOgU94lQN5a1uWVnFrMLceTJw/+fQXR5PGbUVRaHshQ==", - "requires": { - "debug": "^2.2.0", - "es5-ext": "^0.10.50", - "nan": "^2.14.0", - "typedarray-to-buffer": "^3.1.5", - "yaeti": "^0.0.6" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - } - } - }, - "whatwg-fetch": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz", - "integrity": "sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - }, - "which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", - "dev": true - }, - "widest-line": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz", - "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==", - "requires": { - "string-width": "^2.1.1" - } - }, - "winston": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/winston/-/winston-3.2.1.tgz", - "integrity": "sha512-zU6vgnS9dAWCEKg/QYigd6cgMVVNwyTzKs81XZtTFuRwJOcDdBg7AU0mXVyNbs7O5RH2zdv+BdNZUlx7mXPuOw==", - "requires": { - "async": "^2.6.1", - "diagnostics": "^1.1.1", - "is-stream": "^1.1.0", - "logform": "^2.1.1", - "one-time": "0.0.4", - "readable-stream": "^3.1.1", - "stack-trace": "0.0.x", - "triple-beam": "^1.3.0", - "winston-transport": "^4.3.0" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "winston-transport": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.3.0.tgz", - "integrity": "sha512-B2wPuwUi3vhzn/51Uukcao4dIduEiPOcOt9HJ3QeaXgkJ5Z7UwpBzxS4ZGNHtrxrUvTwemsQiSys0ihOf8Mp1A==", - "requires": { - "readable-stream": "^2.3.6", - "triple-beam": "^1.2.0" - } - }, - "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true - }, - "wrap-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-4.0.0.tgz", - "integrity": "sha512-uMTsj9rDb0/7kk1PbcbCcwvHUxp60fGDB/NNXpVa0Q+ic/e7y5+BwTxKfQ33VYgDppSwi/FBzpetYzo8s6tfbg==", - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^2.1.1", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "write": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", - "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", - "dev": true, - "requires": { - "mkdirp": "^0.5.1" - } - }, - "write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dev": true, - "requires": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "write-json-file": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/write-json-file/-/write-json-file-4.3.0.tgz", - "integrity": "sha512-PxiShnxf0IlnQuMYOPPhPkhExoCQuTUNPOa/2JWCYTmBquU9njyyDuwRKN26IZBlp4yn1nt+Agh2HOOBl+55HQ==", - "dev": true, - "requires": { - "detect-indent": "^6.0.0", - "graceful-fs": "^4.1.15", - "is-plain-obj": "^2.0.0", - "make-dir": "^3.0.0", - "sort-keys": "^4.0.0", - "write-file-atomic": "^3.0.0" - }, - "dependencies": { - "is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", - "dev": true - }, - "sort-keys": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-4.0.0.tgz", - "integrity": "sha512-hlJLzrn/VN49uyNkZ8+9b+0q9DjmmYcYOnbMQtpkLrYpPwRApDPZfmqbUfJnAA3sb/nRib+nDot7Zi/1ER1fuA==", - "dev": true, - "requires": { - "is-plain-obj": "^2.0.0" - } - } - } - }, - "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", - "requires": { - "async-limiter": "~1.0.0" - } - }, - "xxhashjs": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz", - "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==", - "requires": { - "cuint": "^0.2.2" - } - }, - "y18n": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", - "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==", - "dev": true - }, - "yaeti": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/yaeti/-/yaeti-0.0.6.tgz", - "integrity": "sha1-8m9ITXJoTPQr7ft2lwqhYI+/lXc=" - }, - "yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dev": true, - "requires": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - } - } -} diff --git a/.maintain/chaostest/package.json b/.maintain/chaostest/package.json deleted file mode 100644 index b659f75181113..0000000000000 --- a/.maintain/chaostest/package.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "name": "chaostest", - "description": "A cli for chaos testing on substrate", - "version": "0.0.0", - "author": "HarryHong", - "bin": { - "chaostest": "./bin/run" - }, - "bugs": "https://github.com/paritytech/substrate/issues", - "dependencies": { - "@kubernetes/client-node": "^0.11.1", - "@oclif/command": "^1", - "@oclif/config": "^1", - "@oclif/plugin-help": "^2", - "@polkadot/api": "^0.95.0-beta.14", - "@polkadot/keyring": "^1.6.0-beta.9", - "winston": "^3.2.1" - }, - "devDependencies": { - "@oclif/dev-cli": "^1", - "@oclif/test": "^1", - "chai": "^4", - "eslint": "^7.1.0", - "eslint-config-oclif": "^3.1", - "eslint-config-standard": "^14.1.1", - "eslint-plugin-import": "^2.20.2", - "eslint-plugin-node": "^11.1.0", - "eslint-plugin-promise": "^4.2.1", - "eslint-plugin-standard": "^4.0.1", - "globby": "^10", - "nyc": "^14" - }, - "engines": { - "node": ">=8.0.0" - }, - "files": [ - "/bin", - "/npm-shrinkwrap.json", - "/oclif.manifest.json", - "/src" - ], - "homepage": "https://github.com/paritytech/substrate/tree/master/.maintain/chaostest", - "keywords": [ - "oclif" - ], - "main": "src/index.js", - "oclif": { - "commands": "./src/commands", - "bin": "chaostest", - "plugins": [ - "@oclif/plugin-help" - ] - }, - "repository": "https://github.com/paritytech/substrate/tree/master/.maintain/chaostest", - "scripts": { - "postpack": "rm -f oclif.manifest.json", - "posttest": "eslint .", - "prepack": "oclif-dev manifest && oclif-dev readme", - "version": "oclif-dev readme && git add README.md" - } -} diff --git a/.maintain/chaostest/src/commands/clean/index.js b/.maintain/chaostest/src/commands/clean/index.js deleted file mode 100644 index 9f8f5b95f8978..0000000000000 --- a/.maintain/chaostest/src/commands/clean/index.js +++ /dev/null @@ -1,31 +0,0 @@ -const { Command, flags } = require('@oclif/command') -const CONFIG = require('../../config')() -const logger = require('../../utils/logger') -const Hypervisor = require('../../hypervisor') - -class CleanCommand extends Command { - async run () { - const { flags } = this.parse(CleanCommand) - const namespace = flags.namespace || CONFIG.namespace - const hypervisor = new Hypervisor(CONFIG) - // Delete corresponding namespace, default to CONFIG.namespace - try { - if (namespace) { - await hypervisor.cleanup(namespace) - } else { - logger.debug('Nothing to clean up') - } - } catch (error) { - logger.error(error) - process.exit(1) - } - } -} - -CleanCommand.description = 'Clean up resources based on namespace' - -CleanCommand.flags = { - namespace: flags.string({ char: 'n', description: 'desired namespace to clean up', env: 'NAMESPACE' }) -} - -module.exports = CleanCommand diff --git a/.maintain/chaostest/src/commands/singlenodeheight/index.js b/.maintain/chaostest/src/commands/singlenodeheight/index.js deleted file mode 100644 index 05006d745b4e2..0000000000000 --- a/.maintain/chaostest/src/commands/singlenodeheight/index.js +++ /dev/null @@ -1,63 +0,0 @@ -const { Command, flags } = require('@oclif/command') -const CONFIG = require('../../config')() -const { succeedExit, errorExit } = require('../../utils/exit') -const Hypervisor = require('../../hypervisor') -const logger = require('../../utils/logger') - -class SingleNodeHeightCommand extends Command { - async run () { - const { flags } = this.parse(SingleNodeHeightCommand) - let port = flags.port - let url = flags.url - const wait = flags.wait || 600 * 1000 - const height = flags.height || 10 - const namespace = flags.namespace || CONFIG.namespace - const pod = flags.pod || (CONFIG.nodes && CONFIG.nodes[0]) ? CONFIG.nodes[0].podName : undefined - const now = Date.now() - - const hypervisor = new Hypervisor(CONFIG) - if (!!url && !!port) { - JsonRpcCallTestHeight(url, port) - } else if (!!pod && !!namespace) { - url = 'http://127.0.0.1' - port = 9933 - await hypervisor.startForwardServer(namespace, pod, port) - JsonRpcCallTestHeight(url, port) - } else { - errorExit('Not enough parameters provided. Either specify url and port or pod and namespace.') - } - - async function JsonRpcCallTestHeight (url, port) { - logger.debug('Polling chain height...') - if (Date.now() < now + wait) { - try { - const curHeight = await hypervisor.getChainBlockHeight(url, port) - logger.debug('Current Block Height: ' + curHeight) - if (curHeight > height) { - logger.info(`Single dev node Blockheight reached ${height}`) - succeedExit() - } else { - setTimeout(() => JsonRpcCallTestHeight(url, port), 2000) - } - } catch (error) { - errorExit('Error requesting chain block height', error) - } - } else { - errorExit('Timed out') - } - } - } -} - -SingleNodeHeightCommand.description = 'Test if targeted node is producing blocks > certain height' - -SingleNodeHeightCommand.flags = { - port: flags.integer({ char: 'p', description: 'port to deploy' }), - url: flags.string({ char: 'u', description: 'connect url' }), - timeout: flags.string({ char: 't', description: 'wait time in miliseconds to halt' }), - height: flags.string({ char: 'h', description: 'desired height to test' }), - pod: flags.string({ description: 'desired pod to test' }), - namespace: flags.string({ description: 'desired namespace to test' }) -} - -module.exports = SingleNodeHeightCommand diff --git a/.maintain/chaostest/src/commands/spawn/index.js b/.maintain/chaostest/src/commands/spawn/index.js deleted file mode 100644 index 785037b029536..0000000000000 --- a/.maintain/chaostest/src/commands/spawn/index.js +++ /dev/null @@ -1,52 +0,0 @@ -const { Command, flags } = require('@oclif/command') -const logger = require('../../utils/logger') -const Hypervisor = require('../../hypervisor') -const CONFIG = require('../../config')() - -class SpawnCommand extends Command { - async run () { - const { flags } = this.parse(SpawnCommand) - const { args } = this.parse(SpawnCommand) - const imageTag = flags.image || 'parity/substrate:latest' - const port = flags.port || 9933 - const namespace = flags.namespace || 'substrate-ci' - const validator = flags.validator || 0 - const node = flags.node || 1 - - const hypervisor = new Hypervisor(CONFIG) - try { - // Check/Create namespace - await hypervisor.readOrCreateNamespace(namespace) - const chainName = args.chainName - if (chainName) { - if (chainName === 'dev') { - logger.debug('Starting a fullnode in dev mode...') - await hypervisor.createDevNode(imageTag, port) - } else if (chainName === 'alicebob') { - await hypervisor.createAliceBobNodes(imageTag, port) - } else { - // TODO: customized chain with chainName - } - } - } catch (error) { - logger.error(error) - process.exit(1) - } - } -} - -SpawnCommand.description = 'Spawn a local testnet with options' - -SpawnCommand.flags = { - image: flags.string({ char: 'i', description: 'image to deploy' }), - port: flags.integer({ char: 'p', description: 'port to deploy on' }), - namespace: flags.string({ description: 'desired namespace to deploy to', env: 'NAMESPACE' }), - validator: flags.string({ char: 'v', description: 'number of validators' }), - node: flags.string({ char: 'n', description: 'number of full nodes, if not set but exists, default to 1' }), - key: flags.string({ char: 'k', description: 'number of full nodes, if not set but exists, default to 1' }), - chainspec: flags.string({ char: 'c', description: 'number of full nodes, if not set but exists, default to 1' }) -} - -SpawnCommand.args = [{ name: 'chainName' }] - -module.exports = SpawnCommand diff --git a/.maintain/chaostest/src/config/README.md b/.maintain/chaostest/src/config/README.md deleted file mode 100644 index 655e6deacb376..0000000000000 --- a/.maintain/chaostest/src/config/README.md +++ /dev/null @@ -1,34 +0,0 @@ -chaostest CONFIG -========= - -Since deployment can behave differently, we want to keep a state between phases including different test subjects. - -# Content -The state could include informations such as: -``` -{ - namespace, - image, - bootnode: { - podname, - ip, - port, - peerId, - privateKey, - publicKey - }, - nodes: [{ - podname, - ip, - port, - nodeType: 'validator' | 'bootnode' | , - privateKey (validator only), - publicKey (validator only) - }] -} -``` - -# TODO -k8s configuration -chainspec -chaos-agent diff --git a/.maintain/chaostest/src/config/index.js b/.maintain/chaostest/src/config/index.js deleted file mode 100644 index 400597c2bddcc..0000000000000 --- a/.maintain/chaostest/src/config/index.js +++ /dev/null @@ -1,70 +0,0 @@ -const fs = require('fs') -const path = require('path') -const configPath = path.join(__dirname, './config.json') -const logger = require('../utils/logger') - -class Config { - constructor () { - this.load() - } - - async load () { - fs.readFile(configPath, (err, data) => { - if (err) { - if (err.code === 'ENOENT') { - this.reset() - } else { - throw err - } - } else { - try { - Object.assign(this, JSON.parse(data)) - } catch (error) { - logger.error('config file is corrupted, resetting...') - this.reset() - } - }; - }) - }; - - getConfig () { - return this - } - - async update () { - const data = JSON.stringify(this.getConfig()) - fs.writeFile(configPath, data, (err) => { - if (err) throw err - logger.debug('Configuration updated') - }) - } - - async setNamespace (namespace) { - this.namespace = namespace - this.update() - } - - async addNode (node) { - if (!this.nodes || Array.isArray(this.nodes)) { - this.nodes = [] - } - if (node.nodeType === 'bootnode') { - this.bootnode = node - } - this.nodes.push(node) - this.update() - } - - async reset () { - const data = JSON.stringify({}) - fs.writeFile(configPath, data, (err) => { - if (err) throw err - this.load() - }) - } -} - -module.exports = () => { - const config = new Config() - return config -} diff --git a/.maintain/chaostest/src/hypervisor/chainApi/api.js b/.maintain/chaostest/src/hypervisor/chainApi/api.js deleted file mode 100644 index f9265b6386ee4..0000000000000 --- a/.maintain/chaostest/src/hypervisor/chainApi/api.js +++ /dev/null @@ -1,16 +0,0 @@ -const chainApi = require('../modules/chainApi') - -exports.getApi = async function (endpoint) { - if (this._apiInstance && this._apiInstance.endpoint === endpoint) { - return this._apiInstance.instance - } else { - const instance = await chainApi.getApi(endpoint) - this._apiInstance = { endpoint, instance } - return instance - } -} - -exports.getChainBlockHeight = async function (url, port) { - const api = await this.getApi(url + ':' + port) - return chainApi.getChainBlockHeight(api) -} diff --git a/.maintain/chaostest/src/hypervisor/chainApi/index.js b/.maintain/chaostest/src/hypervisor/chainApi/index.js deleted file mode 100644 index c0802401d9182..0000000000000 --- a/.maintain/chaostest/src/hypervisor/chainApi/index.js +++ /dev/null @@ -1,4 +0,0 @@ -const api = require('./api') -module.exports = function (Hypervisor) { - Object.assign(Hypervisor.prototype, api) -} diff --git a/.maintain/chaostest/src/hypervisor/deployment/deployment.js b/.maintain/chaostest/src/hypervisor/deployment/deployment.js deleted file mode 100644 index 906734393af67..0000000000000 --- a/.maintain/chaostest/src/hypervisor/deployment/deployment.js +++ /dev/null @@ -1,123 +0,0 @@ -const k8s = require('../modules/k8s') -const { pollUntil } = require('../../utils/wait') -const { getBootNodeUrl } = require('../../utils') -const logger = require('../../utils/logger') - -exports.readOrCreateNamespace = async function (namespace) { - try { - logger.debug('Reading namespace') - await k8s.readNamespace(namespace) // if namespace is available, do not create here - } catch (error) { - if (error.response.statusCode !== 404) { - logger.error(error) - throw error - } - logger.debug('Namespace not present, creating...') - await k8s.createNamespace(namespace) - } - this.config.setNamespace(namespace) -} -exports.createAlice = async function (image, port) { - const substrateArgs = [ - '--chain=local', - '--node-key', - '0000000000000000000000000000000000000000000000000000000000000001', - '--validator', - '--no-telemetry', - '--rpc-cors', - 'all', - '--alice'] - const nodeSpec = { - nodeId: 'alice', - image, - port, - args: substrateArgs - } - nodeSpec.extraInfo = { - nodeType: 'bootnode', - privateKey: '', - publicKey: '', - peerId: '12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp' - } - await this.createNode(nodeSpec) -} - -exports.createBob = async function (image, port) { - const substrateArgs = [ - '--chain=local', - '--node-key', - '0000000000000000000000000000000000000000000000000000000000000002', - '--validator', - '--bob', - '--no-telemetry', - '--rpc-cors', - 'all', - '--bootnodes', - getBootNodeUrl(this.config.bootnode)] - const nodeSpec = { - nodeId: 'bob', - image, - port, - args: substrateArgs - } - nodeSpec.extraInfo = { - nodeType: 'validator', - privateKey: '', - publicKey: '' - } - await this.createNode(nodeSpec) -} - -exports.createAliceBobNodes = async function (image, port) { - await this.createAlice(image, port) - await this.createBob(image, port) -} - -exports.createDevNode = async function (image, port) { - const substrateArgs = ['--dev', '--rpc-external', '--ws-external'] - const nodeSpec = { - nodeId: 'node-1', - image, - port, - args: substrateArgs - } - await this.createNode(nodeSpec) -} - -exports.createNode = async function (nodeSpec) { - logger.info(`Creating ${nodeSpec.nodeId} as ${nodeSpec.extraInfo ? nodeSpec.extraInfo.nodeType : 'FullNode'} in ${this.config.namespace}`) - await k8s.createPod(nodeSpec, this.config.namespace) - logger.debug('Polling pod status') - const pod = await pollUntil( - () => k8s.getPod(nodeSpec.nodeId, this.config.namespace) - ) - const nodeInfo = { - podName: nodeSpec.nodeId, - ip: pod.status.podIP, - port: nodeSpec.port - } - if (nodeSpec.extraInfo) { - Object.assign(nodeInfo, nodeSpec.extraInfo) - } - logger.info(`${nodeSpec.nodeId} is created`) - this.config.addNode(nodeInfo) -} - -exports.cleanup = async function (namespace) { - await k8s.deleteNamespace(namespace) - if (namespace === this.config.namespace) { - this.config.reset() - } -} - -exports.getPodInfoInConfig = function (namespace, podName) { - if (this.config.namespace === namespace && Array.isArray(this.config.nodes)) { - return this.config.nodes.find((node) => node.podName === podName) - } else { - throw Error('No pod present in the namespace in config') - } -} - -exports.startForwardServer = async function (namespace, pod, port, onReady) { - await k8s.startForwardServer(namespace, pod, port, onReady) -} diff --git a/.maintain/chaostest/src/hypervisor/deployment/index.js b/.maintain/chaostest/src/hypervisor/deployment/index.js deleted file mode 100644 index a01865b6a5438..0000000000000 --- a/.maintain/chaostest/src/hypervisor/deployment/index.js +++ /dev/null @@ -1,4 +0,0 @@ -const deployment = require('./deployment') -module.exports = function (Hypervisor) { - Object.assign(Hypervisor.prototype, deployment) -} diff --git a/.maintain/chaostest/src/hypervisor/index.js b/.maintain/chaostest/src/hypervisor/index.js deleted file mode 100644 index 607f3a33d8421..0000000000000 --- a/.maintain/chaostest/src/hypervisor/index.js +++ /dev/null @@ -1,11 +0,0 @@ -const CONFIG = require('../config')() - -function Hypervisor (config) { - this.config = config || CONFIG -} - -// Mount sub modules of the Hypervisor class -require('./deployment')(Hypervisor) -require('./chainApi')(Hypervisor) - -module.exports = Hypervisor diff --git a/.maintain/chaostest/src/hypervisor/modules/chainApi.js b/.maintain/chaostest/src/hypervisor/modules/chainApi.js deleted file mode 100644 index b2ad897d06cba..0000000000000 --- a/.maintain/chaostest/src/hypervisor/modules/chainApi.js +++ /dev/null @@ -1,18 +0,0 @@ -const { ApiPromise, WsProvider } = require('@polkadot/api') -const { HttpProvider } = require('@polkadot/rpc-provider') - -const getApi = async (url) => { - const httpProvider = new HttpProvider(url) - return httpProvider - // const api = await ApiPromise.create({ provider: wsProvider }) - // return api - // TODO: tried to use websocket provider here, but the polkadot/api version is not stable yet, using http provider for now -} - -const getChainBlockHeight = async (provider) => { - const data = await provider.send('chain_getBlock', []) - const height = parseInt(data.block.header.number, 16) - return height -} - -module.exports = { getApi, getChainBlockHeight } diff --git a/.maintain/chaostest/src/hypervisor/modules/k8s.js b/.maintain/chaostest/src/hypervisor/modules/k8s.js deleted file mode 100644 index 14f22ff5e8dff..0000000000000 --- a/.maintain/chaostest/src/hypervisor/modules/k8s.js +++ /dev/null @@ -1,113 +0,0 @@ -const k8s = require('@kubernetes/client-node') -const { isFunction } = require('../../utils') -const logger = require('../../utils/logger') - -// load k8s -const kc = new k8s.KubeConfig() -kc.loadFromDefault() - -// load k8s Apis -const k8sAppApi = kc.makeApiClient(k8s.AppsV1Api) -const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api) - -const createNamespace = async namespace => { - const namespaceJson = { - apiVersion: 'v1', - kind: 'Namespace', - metadata: { - name: namespace - } - } - return await k8sCoreApi.createNamespace(namespaceJson) -} - -const readNamespace = async namespace => { - return await k8sCoreApi.readNamespace(namespace) -} - -const createPod = async (nodeSpec, namespace) => { - const { label, nodeId, image, args, port } = nodeSpec - const spec = { - metadata: { - labels: { - app: label - }, - name: nodeId - }, - spec: { - containers: [ - { - image: image, - imagePullPolicy: 'Always', - name: nodeId, - ports: [{ containerPort: port }], - args: args - } - ] - } - } - return await k8sCoreApi.createNamespacedPod(namespace, spec) -} - -const getDeploymentStatus = async (deploymentName, namespace) => { - const response = await k8sAppApi.readNamespacedDeploymentStatus(deploymentName, namespace) - const status = response.response.body.status - function getAvailability (item) { - return item.type === 'Available' - } - if (status && status.conditions) { - return status.conditions.find(getAvailability) - } - return undefined -} - -const deleteNamespace = async (namespace) => { - logger.debug(`Taking down Namespace ${namespace}...`) - if (process.env.KEEP_NAMESPACE && process.env.KEEP_NAMESPACE === 1) { - return - } - return k8sCoreApi.deleteNamespace(namespace) -} - -const getNamespacedPods = async (namespace) => { - const response = await k8sCoreApi.listNamespacedPod(namespace) - return response.body.items -} - -const getPod = async (podName, namespace) => { - const pods = await getNamespacedPods(namespace) - const found = pods.find( - (pod) => !!pod.metadata && pod.metadata.name === podName && !!pod.status && pod.status.podIP - ) - if (!found) { - throw Error(`GetNode(${podName}): node is not present in the cluster`) - } - return found -} - -const startForwardServer = async (namespace, pod, port, onReady) => new Promise((resolve, reject) => { - const net = require('net') - const forward = new k8s.PortForward(kc) - - // This simple server just forwards traffic from itself to a service running in kubernetes - // -> localhost:8080 -> port-forward-tunnel -> kubernetes-pod - // This is basically equivalent to 'kubectl port-forward ...' but in Javascript. - const server = net.createServer((socket) => { - forward.portForward(namespace, pod, [port], socket, null, socket) - }) - // TODO: add Ws proxy server to adopt the polkadot/api - server.listen(port, '127.0.0.1', (err) => { - if (err) { - logger.error('Error starting server') - reject(err) - } - logger.info('Forwarding server started, ready to connect') - resolve() - // Optional onReady hook when server started - if (onReady && isFunction(onReady)) { - onReady() - } - }) -}) - -module.exports = { createNamespace, readNamespace, createPod, deleteNamespace, getDeploymentStatus, getPod, getNamespacedPods, startForwardServer } diff --git a/.maintain/chaostest/src/index.js b/.maintain/chaostest/src/index.js deleted file mode 100644 index 176eca6d71ba6..0000000000000 --- a/.maintain/chaostest/src/index.js +++ /dev/null @@ -1 +0,0 @@ -module.exports = require('@oclif/command') diff --git a/.maintain/chaostest/src/utils/exit.js b/.maintain/chaostest/src/utils/exit.js deleted file mode 100644 index 3cf06d290440b..0000000000000 --- a/.maintain/chaostest/src/utils/exit.js +++ /dev/null @@ -1,12 +0,0 @@ -const logger = require('../utils/logger') - -const succeedExit = function () { - process.exit(0) -} - -const errorExit = function (msg, err) { - logger.error(msg, err) - process.exit(1) -} - -module.exports = { succeedExit, errorExit } diff --git a/.maintain/chaostest/src/utils/index.js b/.maintain/chaostest/src/utils/index.js deleted file mode 100644 index b50c177215a24..0000000000000 --- a/.maintain/chaostest/src/utils/index.js +++ /dev/null @@ -1,9 +0,0 @@ -const getBootNodeUrl = (bootnode) => { - return `/dns4/${bootnode.ip}/tcp/30333/p2p/${bootnode.peerId}` -} - -const isFunction = (obj) => { - return !!(obj && obj.constructor && obj.call && obj.apply) -} - -module.exports = { getBootNodeUrl, isFunction } diff --git a/.maintain/chaostest/src/utils/logger.js b/.maintain/chaostest/src/utils/logger.js deleted file mode 100644 index e1da0d8d07f49..0000000000000 --- a/.maintain/chaostest/src/utils/logger.js +++ /dev/null @@ -1,50 +0,0 @@ -const winston = require('winston') -const fs = require('fs') -const logDir = 'log' // Or read from a configuration -const { format, transports } = winston -const env = process.env.NODE_ENV || 'development' -const util = require('util') - -if (!fs.existsSync(logDir)) { - // Create the directory if it does not exist - fs.mkdirSync(logDir) -} - -const logFormat = format.printf(info => { - info.message = util.format(info.message) - if (info.metadata && Object.keys(info.metadata).length) { - info.message = util.format(info.message, info.metadata) - } - return `${info.timestamp} ${info.level}: ${info.message}` -}) - -const logger = winston.createLogger({ - level: env === 'development' ? 'debug' : 'info', - transports: [ - new transports.Console({ - format: format.combine( - format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), - // Format the metadata object - format.metadata({ fillExcept: ['message', 'level', 'timestamp', 'label'] }), - format.colorize(), - logFormat - ) - }), - new winston.transports.File({ - level: env === 'development' ? 'debug' : 'info', - filename: logDir + '/logs.log', - format: format.combine( - format.timestamp(), - format.json() - ), - maxsize: 1024 * 1024 * 10 // 10MB - }) - ], - exceptionHandlers: [ - new winston.transports.File({ - filename: 'log/exceptions.log' - }) - ] -}) - -module.exports = logger diff --git a/.maintain/chaostest/src/utils/wait.js b/.maintain/chaostest/src/utils/wait.js deleted file mode 100644 index 72498d1acb2a6..0000000000000 --- a/.maintain/chaostest/src/utils/wait.js +++ /dev/null @@ -1,32 +0,0 @@ -const logger = require('./logger') -/** - * Wait n milliseconds - * - * @param n - In milliseconds - */ -function waitNMilliseconds (n) { - return new Promise((resolve) => { - setTimeout(resolve, n) - }) -} - -/** - * Run a function until that function correctly resolves - * - * @param fn - The function to run - */ -async function pollUntil (fn) { - try { - const result = await fn() - - return result - } catch (_error) { - logger.error('Error polling', _error) - logger.debug('awaiting...') - await waitNMilliseconds(5000) // FIXME We can add exponential delay here - - return pollUntil(fn) - } -} - -module.exports = { pollUntil, waitNMilliseconds } diff --git a/.maintain/gitlab/lib.sh b/.maintain/common/lib.sh similarity index 89% rename from .maintain/gitlab/lib.sh rename to .maintain/common/lib.sh index 33477b52f5891..1d4be0ecc7296 100755 --- a/.maintain/gitlab/lib.sh +++ b/.maintain/common/lib.sh @@ -66,11 +66,17 @@ has_label(){ repo="$1" pr_id="$2" label="$3" + + # These will exist if the function is called in Gitlab. + # If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set + # already. if [ -n "$GITHUB_RELEASE_TOKEN" ]; then - out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/pulls/$pr_id") - else - out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/pulls/$pr_id") + GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN" + elif [ -n "$GITHUB_PR_TOKEN" ]; then + GITHUB_TOKEN="$GITHUB_PR_TOKEN" fi + + out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id") [ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ] } diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 146cc4cfcbdb5..2253452e203da 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for {{pallet}} +//! Autogenerated weights for {{pallet}} +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}, LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} @@ -44,7 +45,7 @@ pub trait WeightInfo { /// Weights for {{pallet}} using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { {{~#each benchmarks as |benchmark|}} fn {{benchmark.name~}} ( @@ -53,6 +54,7 @@ impl WeightInfo for SubstrateWeight { ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{~/each}} {{~#if (ne benchmark.base_reads "0")}} @@ -81,6 +83,7 @@ impl WeightInfo for () { ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{~/each}} {{~#if (ne benchmark.base_reads "0")}} diff --git a/.maintain/gitlab/check_labels.sh b/.maintain/github/check_labels.sh similarity index 76% rename from .maintain/gitlab/check_labels.sh rename to .maintain/github/check_labels.sh index 5ab099b38291c..75190db6683fa 100755 --- a/.maintain/gitlab/check_labels.sh +++ b/.maintain/github/check_labels.sh @@ -1,11 +1,14 @@ #!/usr/bin/env bash -#shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +#shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" + +repo="$GITHUB_REPOSITORY" +pr="$GITHUB_PR" ensure_labels() { for label in "$@"; do - if has_label 'paritytech/substrate' "$CI_COMMIT_BRANCH" "$label"; then + if has_label "$repo" "$pr" "$label"; then return 0 fi done @@ -27,7 +30,7 @@ criticality_labels=( 'C9-critical' ) -echo "[+] Checking release notes (B) labels for $CI_COMMIT_BRANCH" +echo "[+] Checking release notes (B) labels" if ensure_labels "${releasenotes_labels[@]}"; then echo "[+] Release notes label detected. All is well." else @@ -35,7 +38,7 @@ else exit 1 fi -echo "[+] Checking release criticality (C) labels for $CI_COMMIT_BRANCH" +echo "[+] Checking release criticality (C) labels" if ensure_labels "${criticality_labels[@]}"; then echo "[+] Release criticality label detected. All is well." else diff --git a/.maintain/gitlab/check_line_width.sh b/.maintain/gitlab/check_line_width.sh index 611d3ae2681e2..ebab3013e4b48 100755 --- a/.maintain/gitlab/check_line_width.sh +++ b/.maintain/gitlab/check_line_width.sh @@ -25,7 +25,7 @@ do echo "| error!" echo "| Lines must not be longer than ${LINE_WIDTH} characters." echo "| " - echo "| see more https://wiki.parity.io/Substrate-Style-Guide" + echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" echo "|" FAIL="true" fi @@ -41,7 +41,7 @@ do echo "| warning!" echo "| Lines should be longer than ${GOOD_LINE_WIDTH} characters only in exceptional circumstances!" echo "| " - echo "| see more https://wiki.parity.io/Substrate-Style-Guide" + echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" echo "|" fi echo "| file: ${file}" diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 219af5001b053..e5b308d038e21 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -9,6 +9,7 @@ # polkadot companion: paritytech/polkadot#567 # +set -e github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" # use github api v3 in order to access the data without authentication @@ -40,10 +41,9 @@ EOT git config --global user.name 'CI system' git config --global user.email '<>' -cargo install -f --version 0.2.0 diener - # Merge master into our branch before building Polkadot to make sure we don't miss # any commits that are required by Polkadot. +git fetch --depth 100 origin git merge origin/master # Clone the current Polkadot master branch into ./polkadot. @@ -51,6 +51,8 @@ git merge origin/master # ancestor for successfully performing merges below. git clone --depth 20 https://github.com/paritytech/polkadot.git +cargo install -f diener + cd polkadot # either it's a pull request then check for a companion otherwise use @@ -67,8 +69,8 @@ then pr_body="$(sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p' "${pr_data_file}")" pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" if [ "${pr_companion}" ] @@ -85,10 +87,12 @@ else boldprint "this is not a pull request - building polkadot:master" fi -cd .. -$CARGO_HOME/bin/diener --substrate --branch $CI_COMMIT_REF_NAME --git https://gitlab.parity.io/parity/substrate.git --path polkadot -cd polkadot +# Patch all Substrate crates in Polkadot +diener patch --crates-to-patch ../ --substrate # Test Polkadot pr or master branch with this Substrate commit. cargo update -p sp-io -time cargo test --all --release --verbose +time cargo test --all --release --verbose --features=real-overseer + +cd parachain/test-parachains/adder/collator/ +time cargo test --release --verbose --locked --features=real-overseer diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh index 35c2983886f46..4714baf54fb2f 100755 --- a/.maintain/gitlab/check_polkadot_companion_status.sh +++ b/.maintain/gitlab/check_polkadot_companion_status.sh @@ -43,8 +43,8 @@ pr_body="$(curl -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_C # get companion if explicitly specified pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" if [ -z "${pr_companion}" ] diff --git a/.maintain/gitlab/check_signed.sh b/.maintain/gitlab/check_signed.sh index 7c4cc47baba38..20d47c2304767 100755 --- a/.maintain/gitlab/check_signed.sh +++ b/.maintain/gitlab/check_signed.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +# shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" version="$CI_COMMIT_TAG" diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh index c13871f50ee49..a1190f2bf0bc6 100755 --- a/.maintain/gitlab/generate_changelog.sh +++ b/.maintain/gitlab/generate_changelog.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +# shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" version="$2" last_version="$1" diff --git a/.maintain/gitlab/publish_draft_release.sh b/.maintain/gitlab/publish_draft_release.sh index c5813718a69f2..36ee0d63e78f9 100755 --- a/.maintain/gitlab/publish_draft_release.sh +++ b/.maintain/gitlab/publish_draft_release.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +# shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" version="$CI_COMMIT_TAG" diff --git a/.maintain/sentry-node/docker-compose.yml b/.maintain/local-docker-test-network/docker-compose.yml similarity index 61% rename from .maintain/sentry-node/docker-compose.yml rename to .maintain/local-docker-test-network/docker-compose.yml index 2af9449853c77..53e2a2913f38b 100644 --- a/.maintain/sentry-node/docker-compose.yml +++ b/.maintain/local-docker-test-network/docker-compose.yml @@ -1,24 +1,26 @@ -# Docker compose file to simulate a sentry node setup. +# Docker compose file to start a multi node local test network. # +# # Nodes # -# Setup: +# - Validator node A +# - Validator node B +# - Light client C # -# Validator A is not supposed to be connected to the public internet. Instead it -# connects to a sentry node (sentry-a) which connects to the public internet. -# Validator B can reach validator A via sentry node A and vice versa. +# # Auxiliary nodes # +# - Prometheus monitoring each node. +# - Grafana pointed at the Prometheus node, configured with all dashboards. # -# Usage: +# # Usage # # 1. Build `target/release/substrate` binary: `cargo build --release` -# -# 2. Start networks and containers: `sudo docker-compose -f .maintain/sentry-node/docker-compose.yml up` -# -# 3. Reach: -# - polkadot/apps on localhost:3000 +# 2. Start networks and containers: +# `sudo docker-compose -f .maintain/sentry-node/docker-compose.yml up` +# 3. Connect to nodes: # - validator-a: localhost:9944 # - validator-b: localhost:9945 -# - sentry-a: localhost:9946 +# - light-c: localhost:9946 +# - via polkadot.js/apps: https://polkadot.js.org/apps/?rpc=ws%3A%2F%2Flocalhost%3A#/explorer # - grafana: localhost:3001 # - prometheus: localhost:9090 @@ -34,9 +36,8 @@ services: - ../../target/release/substrate:/usr/local/bin/substrate image: parity/substrate networks: - - network-a + - internet command: - # Local node id: QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR - "--node-key" - "0000000000000000000000000000000000000000000000000000000000000001" - "--base-path" @@ -46,48 +47,38 @@ services: - "30333" - "--validator" - "--alice" - - "--sentry-nodes" - - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" - - "--reserved-nodes" - - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "--bootnodes" + - "/dns/validator-b/tcp/30333/p2p/12D3KooWHdiAxVd8uMQR1hGWXccidmfCwLqcMpGwR6QcTP6QRMuD" # Not only bind to localhost. - "--unsafe-ws-external" - "--unsafe-rpc-external" - # - "--log" - # - "sub-libp2p=trace" - # - "--log" - # - "afg=trace" - "--log" - - "sub-authority-discovery=trace" + - "sub-libp2p=trace" - "--no-telemetry" - "--rpc-cors" - "all" - "--prometheus-external" - sentry-a: + validator-b: image: parity/substrate ports: - - "9946:9944" + - "9945:9944" volumes: - ../../target/release/substrate:/usr/local/bin/substrate networks: - - network-a - internet command: - # Local node id: QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi - "--node-key" - - "0000000000000000000000000000000000000000000000000000000000000003" + - "0000000000000000000000000000000000000000000000000000000000000002" - "--base-path" - - "/tmp/sentry" + - "/tmp/bob" - "--chain=local" - "--port" - "30333" - - "--sentry" - - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" - - "--reserved-nodes" - - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "--validator" + - "--bob" - "--bootnodes" - - "/dns/validator-b/tcp/30333/p2p/QmSVnNf9HwVMT1Y4cK1P6aoJcEZjmoTXpjKBmAABLMnZEk" + - "/dns/validator-a/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp" - "--no-telemetry" - "--rpc-cors" - "all" @@ -95,32 +86,30 @@ services: - "--unsafe-ws-external" - "--unsafe-rpc-external" - "--log" - - "sub-authority-discovery=trace" + - "sub-libp2p=trace" - "--prometheus-external" - validator-b: + light-c: image: parity/substrate ports: - - "9945:9944" + - "9946:9944" volumes: - ../../target/release/substrate:/usr/local/bin/substrate networks: - internet command: - # Local node id: QmSVnNf9HwVMT1Y4cK1P6aoJcEZjmoTXpjKBmAABLMnZEk - "--node-key" - - "0000000000000000000000000000000000000000000000000000000000000002" + - "0000000000000000000000000000000000000000000000000000000000000003" - "--base-path" - - "/tmp/bob" + - "/tmp/light" - "--chain=local" - "--port" - "30333" - - "--validator" - - "--bob" + - "--light" - "--bootnodes" - - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "/dns/validator-a/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp" - "--bootnodes" - - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "/dns/validator-b/tcp/30333/p2p/12D3KooWHdiAxVd8uMQR1hGWXccidmfCwLqcMpGwR6QcTP6QRMuD" - "--no-telemetry" - "--rpc-cors" - "all" @@ -128,20 +117,19 @@ services: - "--unsafe-ws-external" - "--unsafe-rpc-external" - "--log" - - "sub-authority-discovery=trace" + - "sub-libp2p=trace" - "--prometheus-external" prometheus: image: prom/prometheus networks: - - network-a - internet ports: - "9090:9090" links: - validator-a:validator-a - - sentry-a:sentry-a - validator-b:validator-b + - light-c:light-c volumes: - ./prometheus/:/etc/prometheus/ restart: always @@ -152,7 +140,6 @@ services: depends_on: - prometheus networks: - - network-a - internet ports: - 3001:3000 diff --git a/.maintain/sentry-node/grafana/provisioning/dashboards/dashboards.yml b/.maintain/local-docker-test-network/grafana/provisioning/dashboards/dashboards.yml similarity index 100% rename from .maintain/sentry-node/grafana/provisioning/dashboards/dashboards.yml rename to .maintain/local-docker-test-network/grafana/provisioning/dashboards/dashboards.yml diff --git a/.maintain/sentry-node/grafana/provisioning/datasources/datasource.yml b/.maintain/local-docker-test-network/grafana/provisioning/datasources/datasource.yml similarity index 100% rename from .maintain/sentry-node/grafana/provisioning/datasources/datasource.yml rename to .maintain/local-docker-test-network/grafana/provisioning/datasources/datasource.yml diff --git a/.maintain/sentry-node/prometheus/prometheus.yml b/.maintain/local-docker-test-network/prometheus/prometheus.yml similarity index 89% rename from .maintain/sentry-node/prometheus/prometheus.yml rename to .maintain/local-docker-test-network/prometheus/prometheus.yml index 547d4bea57ae5..f8acb7c0b8ccd 100644 --- a/.maintain/sentry-node/prometheus/prometheus.yml +++ b/.maintain/local-docker-test-network/prometheus/prometheus.yml @@ -7,9 +7,9 @@ scrape_configs: - targets: ['validator-a:9615'] labels: network: dev - - targets: ['sentry-a:9615'] + - targets: ['validator-b:9615'] labels: network: dev - - targets: ['validator-b:9615'] + - targets: ['light-c:9615'] labels: network: dev diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 16a27c06d3e05..cf00d7e2b90f1 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -127,8 +127,8 @@ groups: ############################################################################## - alert: ContinuousTaskEnded - expr: '(polkadot_tasks_spawned_total == 1) - on(instance, task_name) - (polkadot_tasks_ended_total == 1)' + expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer"} == 1) + - on(instance, task_name) (polkadot_tasks_ended_total == 1)' for: 5m labels: severity: warning @@ -147,3 +147,28 @@ groups: message: 'Authority discovery on node {{ $labels.instance }} fails to process more than 50 % of the values found on the DHT for more than 2 hours.' + + - alert: UnboundedChannelPersistentlyLarge + expr: '( + (polkadot_unbounded_channel_len{action = "send"} - + ignoring(action) polkadot_unbounded_channel_len{action = "received"}) + or on(instance) polkadot_unbounded_channel_len{action = "send"} + ) >= 200' + for: 5m + labels: + severity: warning + annotations: + message: 'Channel {{ $labels.entity }} on node {{ $labels.instance }} contains + more than 200 items for more than 5 minutes. Node might be frozen.' + + - alert: UnboundedChannelVeryLarge + expr: '( + (polkadot_unbounded_channel_len{action = "send"} - + ignoring(action) polkadot_unbounded_channel_len{action = "received"}) + or on(instance) polkadot_unbounded_channel_len{action = "send"} + ) > 15000' + labels: + severity: warning + annotations: + message: 'Channel {{ $labels.entity }} on node {{ $labels.instance }} contains more than + 15000 items.' diff --git a/.maintain/monitoring/grafana-dashboards/README_dashboard.md b/.maintain/monitoring/grafana-dashboards/README_dashboard.md index 37bebc6f8eaae..e00b89449cfaf 100644 --- a/.maintain/monitoring/grafana-dashboards/README_dashboard.md +++ b/.maintain/monitoring/grafana-dashboards/README_dashboard.md @@ -5,10 +5,3 @@ Shared templated Grafana dashboards. To import the dashboards follow the [Grafana documentation](https://grafana.com/docs/grafana/latest/reference/export_import/). You can see an example setup [here](../../../.maintain/sentry-node). - -#### Required labels on Prometheus metrics - -- `instance` referring to a single scrape target (see [Prometheus docs for - details](https://prometheus.io/docs/concepts/jobs_instances/)). - -- `network` referring to the Blockchain network e.g. Kusama. diff --git a/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json b/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json deleted file mode 100644 index 629b22617b22a..0000000000000 --- a/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json +++ /dev/null @@ -1,1650 +0,0 @@ -{ - "annotations": { - "list": [ - { - "$$hashKey": "object:15", - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "iteration": 1586424254170, - "links": [ - { - "icon": "external link", - "tags": [], - "targetBlank": true, - "title": "With love from ColmenaLabs", - "tooltip": "", - "type": "link", - "url": "https://colmenalabs.org" - }, - { - "icon": "external link", - "tags": [], - "targetBlank": true, - "title": "Polkastats.io", - "tooltip": "", - "type": "link", - "url": "https://polkastats.io" - } - ], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"}[10m])/rate([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"}[1m])", - "intervalFactor": 1, - "legendFormat": "rate[10m] / rate[1m]", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Relative Block Production Speed", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 0 - }, - "hiddenSeries": false, - "id": 15, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_peers_count{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Peers count", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 17, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.4.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar([[metric_namespace]]_block_height{status=\"best\",instance=\"[[instance]]\",network=\"[[network]]\"})-scalar([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"})", - "intervalFactor": 2, - "legendFormat": "[[hostname]]", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Diff -> ( Best Block - Finalized )", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 0 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"}[10m])*60", - "intervalFactor": 10, - "legendFormat": "{{instance}} Blocks / minute", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Block rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 6 - }, - "hiddenSeries": false, - "id": 10, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "increase([[metric_namespace]]_block_height{instance=\"[[instance]]\",network=\"[[network]]\",status=~\"finalized|sync_target\"}[1m])", - "intervalFactor": 5, - "legendFormat": "{{status}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Blocks Av per min", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 6 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_block_height{instance=\"[[instance]]\",network=\"[[network]]\",status=~\"finalized|sync_target\"}", - "legendFormat": "{{instance}} {{status}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Block Finalized", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 6 - }, - "hiddenSeries": false, - "id": 13, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_block_height{status=\"best\",instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Block height", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 6 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "data": "", - "expr": "[[metric_namespace]]_ready_transactions_number{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}}", - "refId": "A", - "target": "txcount", - "type": "timeseries" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "TXs Count", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 12 - }, - "hiddenSeries": false, - "id": 23, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_active{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} active", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_failed{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} failed", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_importing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} importing", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_pending{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} pending", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Sync Proof", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 12 - }, - "hiddenSeries": false, - "id": 22, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sync_extra_justifications_active{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} active", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sync_extra_justifications_failed{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} failed", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sync_extra_justifications_importing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} importing", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sync_extra_justifications_pending{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} pending", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Sync justifications", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 12 - }, - "hiddenSeries": false, - "id": 24, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_connections{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}} connections", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_is_major_syncing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}} syncing", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_kbuckets_num_nodes{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}} num_nodes", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "sub_libp2p", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 12 - }, - "hiddenSeries": false, - "id": 26, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"FRNK\",direction=\"in\"}", - "hide": false, - "legendFormat": "{{instance}} FRNK in", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"FRNK\",direction=\"out\"}", - "hide": false, - "legendFormat": "{{instance}} FRNK out", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "libp2p_notifications", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 28, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_cpu_usage_percentage{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "CPU usage %", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 18 - }, - "hiddenSeries": false, - "id": 27, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_memory_usage_bytes{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} Mem bytes", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 2, - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 25, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_network_per_sec_bytes", - "hide": false, - "legendFormat": "{{instance}}", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total", - "hide": true, - "legendFormat": "{{instance}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "libp2p_network_per_sec_bytes", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 18 - }, - "hiddenSeries": false, - "id": 29, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.5.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"dot1\",direction=\"in\"}", - "hide": false, - "legendFormat": "{{instance}} dot1 in", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"dot2\",direction=\"in\"}", - "hide": false, - "legendFormat": "{{instance}} dot2 in", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"dot2\",direction=\"out\"}", - "hide": false, - "legendFormat": "{{instance}} dot2 out", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "libp2p_notifications", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 22, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": null, - "current": { - "selected": true, - "text": "substrate", - "value": "substrate" - }, - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "metric_namespace", - "options": [ - { - "selected": true, - "text": "substrate", - "value": "substrate" - }, - { - "selected": false, - "text": "polkadot", - "value": "polkadot" - } - ], - "query": "substrate, polkadot", - "skipUrlSync": false, - "type": "custom" - }, - { - "allValue": null, - "current": { - "selected": true, - "text": "dev", - "value": "dev" - }, - "datasource": "Prometheus", - "definition": "label_values(network)", - "hide": 0, - "includeAll": false, - "index": -1, - "label": null, - "multi": false, - "name": "network", - "options": [], - "query": "label_values(network)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "selected": false, - "text": "validator-a:9615", - "value": "validator-a:9615" - }, - "datasource": "Prometheus", - "definition": "label_values(instance)", - "hide": 0, - "includeAll": false, - "index": -1, - "label": null, - "multi": false, - "name": "instance", - "options": [], - "query": "label_values(instance)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Substrate Dashboard", - "uid": "ColmenaLabs", - "variables": { - "list": [] - }, - "version": 2 -} diff --git a/.maintain/monitoring/grafana-dashboards/substrate-networking.json b/.maintain/monitoring/grafana-dashboards/substrate-networking.json index dfc143005493d..d2abfd1cb864e 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-networking.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-networking.json @@ -1,13 +1,5 @@ { "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - }, { "name": "VAR_METRIC_NAMESPACE", "type": "constant", @@ -17,11 +9,17 @@ } ], "__requires": [ + { + "type": "panel", + "id": "dashlist", + "name": "Dashboard list", + "version": "" + }, { "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.7.3" + "version": "7.3.6" }, { "type": "panel", @@ -29,17 +27,17 @@ "name": "Graph", "version": "" }, - { - "type": "panel", - "id": "heatmap", - "name": "Heatmap", - "version": "" - }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" + }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" } ], "annotations": { @@ -76,78 +74,162 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1600780210197, + "iteration": 1610462565248, "links": [], "panels": [ { "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { "h": 1, - "w": 24, + "w": 12, "x": 0, "y": 0 }, - "id": 167, - "title": "Sync", + "id": 308, + "options": { + "content": "", + "mode": "markdown" + }, + "pluginVersion": "7.3.6", + "repeat": "nodename", + "timeFrom": null, + "timeShift": null, + "title": "$nodename", + "type": "text" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 27, + "panels": [], + "title": "Transport", "type": "row" }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 5, - "w": 24, + "h": 6, + "w": 12, "x": 0, - "y": 1 + "y": 2 }, "hiddenSeries": false, - "id": 101, + "id": 19, + "interval": "1m", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "connected", + "maxPerRow": 12, + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "alias": "established (in)", + "color": "#37872D" + }, + { + "alias": "established (out)", + "color": "#C4162A" + }, + { + "alias": "pending (out)", + "color": "#FF7383" + }, + { + "alias": "closed-recently", + "color": "#FADE2A", + "steppedLine": true + } + ], "spaceLength": 10, - "stack": false, - "steppedLine": true, + "stack": true, + "steppedLine": false, "targets": [ { - "expr": "1 - (${metric_namespace}_sub_libp2p_peerset_num_requested{instance=~\"${nodename}\"} - ${metric_namespace}_sub_libp2p_peers_count{instance=~\"${nodename}\"}) / ${metric_namespace}_sub_libp2p_peerset_num_requested{instance=~\"${nodename}\"}", + "expr": "(\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance) -\n sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance)\n)\n\n# Because `closed_total` can be null, this serves as fallback\nor on(instance) sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance)", + "format": "time_series", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "established (in)", "refId": "A" + }, + { + "expr": "(\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance) -\n sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance)\n)\n\n# Because `closed_total` can be null, this serves as fallback\nor on(instance) sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance)", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "established (out)", + "refId": "C" + }, + { + "expr": "sum by (instance) (${metric_namespace}_sub_libp2p_pending_connections{instance=~\"${nodename}\"})", + "hide": false, + "interval": "", + "legendFormat": "pending (out)", + "refId": "B" + }, + { + "expr": "sum(rate(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "closed-per-sec", + "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of peer slots filled", + "title": "Average transport-level (TCP, QUIC, ...) connections", "tooltip": { "shared": true, - "sort": 1, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -160,10 +242,10 @@ }, "yaxes": [ { - "format": "percentunit", - "label": null, + "format": "short", + "label": "Connections", "logBase": 1, - "max": "1.0", + "max": null, "min": null, "show": true }, @@ -181,63 +263,65 @@ "alignLevel": null } }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 6 - }, - "id": 29, - "panels": [], - "repeat": "request_protocol", - "title": "Requests (${request_protocol})", - "type": "row" - }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 6, "w": 12, "x": 0, - "y": 7 + "y": 8 }, "hiddenSeries": false, - "id": 148, + "id": 189, + "interval": "1m", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "maxPerRow": 12, + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", + "expr": "1 - \n\navg(\n ${metric_namespace}_sub_libp2p_distinct_peers_connections_opened_total{instance=~\"${nodename}\"} - ${metric_namespace}_sub_libp2p_distinct_peers_connections_closed_total{instance=~\"${nodename}\"}\n) by (instance)\n\n/\n\navg(\r\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}) by (instance)\r\n) by (instance)", + "format": "time_series", + "hide": false, "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -247,7 +331,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Requests emitted per second", + "title": "Percentage of peers for which we have more than one connection open", "tooltip": { "shared": true, "sort": 2, @@ -263,8 +347,8 @@ }, "yaxes": [ { - "format": "reqps", - "label": null, + "format": "percentunit", + "label": "", "logBase": 1, "max": null, "min": null, @@ -276,7 +360,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -286,20 +370,28 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 6, "w": 12, - "x": 12, - "y": 7 + "x": 0, + "y": 14 }, "hiddenSeries": false, - "id": 151, + "id": 39, + "interval": "1m", "legend": { "avg": false, "current": false, @@ -309,33 +401,47 @@ "total": false, "values": false }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "repeat": "nodename", + "seriesOverrides": [ + { + "alias": "/.*/", + "color": "#FF780A" + } + ], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", + "expr": "rate(${metric_namespace}_sub_libp2p_incoming_connections_handshake_errors_total{instance=~\"${nodename}\"}[$__interval])", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{reason}}", "refId": "A" + }, + { + "expr": "rate(${metric_namespace}_sub_libp2p_listeners_errors_total{instance=~\"${nodename}\"}[$__interval])", + "interval": "", + "legendFormat": "pre-handshake", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Requests served per second", + "title": "Number of incoming connection errors", "tooltip": { "shared": true, "sort": 2, @@ -351,8 +457,8 @@ }, "yaxes": [ { - "format": "reqps", - "label": null, + "format": "short", + "label": "Errors", "logBase": 1, "max": null, "min": null, @@ -364,7 +470,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -378,56 +484,64 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "description": "Each bucket represent a certain number of nodes using a certain bandwidth range.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 6, "w": 12, "x": 0, - "y": 11 + "y": 20 }, "hiddenSeries": false, - "id": 256, + "id": 4, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", - "instant": false, + "expr": "rate(${metric_namespace}_sub_libp2p_network_bytes_total{instance=~\"${nodename}\"}[5m])", "interval": "", - "legendFormat": "{{instance}}", - "refId": "A" + "legendFormat": "{{direction}}", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Median request answer time", + "title": "Network bandwidth - # bytes per second", "tooltip": { "shared": true, - "sort": 2, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -440,7 +554,7 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -463,48 +577,63 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 7, "w": 12, - "x": 12, - "y": 11 + "x": 0, + "y": 26 }, "hiddenSeries": false, - "id": 258, + "id": 81, + "interval": "1m", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": true, + "hideZero": true, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", + "expr": "rate(${metric_namespace}_sub_libp2p_pending_connections_errors_total{instance=~\"${nodename}\"}[$__interval])", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -512,7 +641,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Median request serving time", + "title": "Dialing attempt errors", "tooltip": { "shared": true, "sort": 2, @@ -528,7 +657,7 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -541,7 +670,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -551,49 +680,60 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 7, "w": 12, "x": 0, - "y": 15 + "y": 33 }, "hiddenSeries": false, - "id": 257, + "id": 46, + "interval": "1m", "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": false, + "show": true, "total": false, "values": false }, - "lines": true, + "lines": false, "linewidth": 1, + "maxPerRow": 12, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", - "instant": false, + "expr": "rate(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval])", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{reason}} ({{direction}})", "refId": "A" } ], @@ -601,7 +741,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile request answer time", + "title": "Disconnects", "tooltip": { "shared": true, "sort": 2, @@ -617,8 +757,9 @@ }, "yaxes": [ { - "format": "s", - "label": null, + "decimals": null, + "format": "short", + "label": "Disconnects", "logBase": 1, "max": null, "min": null, @@ -630,7 +771,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -638,22 +779,44 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 167, + "panels": [], + "repeat": null, + "title": "Sync", + "type": "row" + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 5, "w": 12, - "x": 12, - "y": 15 + "x": 0, + "y": 41 }, "hiddenSeries": false, - "id": 259, + "id": 101, "legend": { "avg": false, "current": false, @@ -665,34 +828,43 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": false, + "steppedLine": true, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", + "expr": "${metric_namespace}_sub_libp2p_peerset_num_requested{instance=~\"${nodename}\"}", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "peers-requested", "refId": "A" + }, + { + "expr": "polkadot_sub_libp2p_peers_count{instance=~\"${nodename}.*\"}", + "interval": "", + "legendFormat": "peers-count", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile request serving time", + "title": "Number of peer slots filled", "tooltip": { - "shared": false, - "sort": 2, + "shared": true, + "sort": 1, "value_type": "individual" }, "type": "graph", @@ -705,11 +877,11 @@ }, "yaxes": [ { - "format": "s", + "format": "none", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -718,7 +890,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -726,22 +898,44 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 46 + }, + "id": 29, + "panels": [], + "repeat": "request_protocol", + "title": "Requests (${request_protocol})", + "type": "row" + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 4, "w": 12, "x": 0, - "y": 19 + "y": 47 }, "hiddenSeries": false, - "id": 287, + "id": 148, "legend": { "avg": false, "current": false, @@ -753,24 +947,25 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", - "instant": false, + "expr": "irate(${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -778,7 +973,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Outgoing request failures per second", + "title": "Requests emitted per second", "tooltip": { "shared": true, "sort": 2, @@ -794,7 +989,7 @@ }, "yaxes": [ { - "format": "short", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -821,16 +1016,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 4, "w": 12, - "x": 12, - "y": 19 + "x": 0, + "y": 51 }, "hiddenSeries": false, - "id": 286, + "id": 151, "legend": { "avg": false, "current": false, @@ -842,24 +1044,25 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_in_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", - "instant": false, + "expr": "irate(${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -867,7 +1070,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Ingoing request failures per second", + "title": "Requests served per second", "tooltip": { "shared": true, "sort": 2, @@ -883,7 +1086,7 @@ }, "yaxes": [ { - "format": "short", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -904,79 +1107,60 @@ "alignLevel": null } }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 40 - }, - "id": 23, - "panels": [], - "repeat": "notif_protocol", - "title": "Notifications (${notif_protocol})", - "type": "row" - }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 4, "w": 12, "x": 0, - "y": 41 + "y": 55 }, "hiddenSeries": false, - "id": 31, - "interval": "1m", + "id": 256, "legend": { "avg": false, "current": false, "max": false, "min": false, - "rightSide": true, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, - "maxPerRow": 2, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "/(in)/", - "color": "#73BF69" - }, - { - "alias": "/(out)/", - "color": "#F2495C" - } - ], + "repeat": "nodename", + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg by (direction) (irate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval]))", + "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", + "instant": false, "interval": "", - "legendFormat": "{{direction}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -984,7 +1168,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average network notifications per second", + "title": "Median request answer time", "tooltip": { "shared": true, "sort": 2, @@ -1000,8 +1184,8 @@ }, "yaxes": [ { - "format": "cps", - "label": "Notifs/sec", + "format": "s", + "label": null, "logBase": 1, "max": null, "min": null, @@ -1013,7 +1197,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1027,62 +1211,53 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 4, "w": 12, - "x": 12, - "y": 41 + "x": 0, + "y": 59 }, "hiddenSeries": false, - "id": 37, - "interval": "1m", + "id": 258, "legend": { - "alignAsTable": false, "avg": false, "current": false, - "hideEmpty": false, - "hideZero": false, "max": false, "min": false, - "rightSide": true, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, - "maxPerRow": 2, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "/(in)/", - "color": "#73BF69" - }, - { - "alias": "/(out)/", - "color": "#F2495C" - } - ], + "repeat": "nodename", + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval])) by (direction)", - "instant": false, + "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", "interval": "", - "legendFormat": "{{direction}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -1090,7 +1265,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average bandwidth used by notifications", + "title": "Median request serving time", "tooltip": { "shared": true, "sort": 2, @@ -1106,8 +1281,8 @@ }, "yaxes": [ { - "format": "Bps", - "label": "Bandwidth", + "format": "s", + "label": null, "logBase": 1, "max": null, "min": null, @@ -1119,7 +1294,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1133,16 +1308,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 1, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "gridPos": { - "h": 6, + "h": 4, "w": 12, "x": 0, - "y": 48 + "y": 63 }, "hiddenSeries": false, - "id": 16, + "id": 257, "legend": { "avg": false, "current": false, @@ -1156,19 +1338,22 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": true, + "steppedLine": false, "targets": [ { - "expr": "max(${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"sent\"} - ignoring(action) ${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"received\"}) by (instance) > 0", + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", + "instant": false, "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -1178,10 +1363,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Total sizes of notifications waiting to be delivered to the rest of Substrate", + "title": "99th percentile request answer time", "tooltip": { - "shared": false, - "sort": 1, + "shared": true, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -1194,7 +1379,7 @@ }, "yaxes": [ { - "format": "bytes", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -1207,7 +1392,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1221,16 +1406,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 1, - "fillGradient": 1, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, "gridPos": { - "h": 6, + "h": 4, "w": 12, - "x": 12, - "y": 48 + "x": 0, + "y": 67 }, "hiddenSeries": false, - "id": 21, + "id": 259, "legend": { "avg": false, "current": false, @@ -1244,23 +1436,23 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, - "pluginVersion": "6.4.5", + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, protocol) / sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, protocol)", - "format": "time_series", + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", "interval": "", - "legendFormat": "{{direction}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -1268,9 +1460,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average size of sent and received notifications in the past 5 minutes", + "title": "99th percentile request serving time", "tooltip": { - "shared": true, + "shared": false, "sort": 2, "value_type": "individual" }, @@ -1284,9 +1476,9 @@ }, "yaxes": [ { - "format": "bytes", - "label": "Max. notification size", - "logBase": 10, + "format": "s", + "label": null, + "logBase": 1, "max": null, "min": null, "show": true @@ -1297,7 +1489,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1311,73 +1503,65 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "description": "99.9% of the time, the output queue size for this protocol is below the given value", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 6, + "h": 4, "w": 12, "x": 0, - "y": 54 + "y": 71 }, "hiddenSeries": false, - "id": 14, + "id": 287, "legend": { - "alignAsTable": false, "avg": false, - "current": true, - "hideEmpty": false, - "hideZero": true, - "max": true, + "current": false, + "max": false, "min": false, - "rightSide": true, - "show": true, + "show": false, "total": false, - "values": true + "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "max", - "fill": 1, - "linewidth": 0 - } - ], + "repeat": "nodename", + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_notifications_queues_size_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[2m])) by (le, instance))", - "hide": false, + "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", + "instant": false, "interval": "", - "legendFormat": "{{protocol}}", + "legendFormat": "{{reason}}", "refId": "A" - }, - { - "expr": "max(histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_notifications_queues_size_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[2m])) by (le, instance)))", - "interval": "", - "legendFormat": "max", - "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile of queues sizes", + "title": "Outgoing request failures per second", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -1393,8 +1577,8 @@ "format": "short", "label": null, "logBase": 1, - "max": "300", - "min": "0", + "max": null, + "min": null, "show": true }, { @@ -1403,7 +1587,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1417,16 +1601,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 1, - "fillGradient": 1, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, "gridPos": { - "h": 6, + "h": 4, "w": 12, - "x": 12, - "y": 54 + "x": 0, + "y": 75 }, "hiddenSeries": false, - "id": 134, + "id": 286, "legend": { "avg": false, "current": false, @@ -1440,23 +1631,24 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, - "pluginVersion": "6.4.5", + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(1.0, sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, le))", - "format": "time_series", + "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_in_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", + "instant": false, "interval": "", - "legendFormat": "{{direction}}", + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -1464,7 +1656,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Maximum size of sent and received notifications in the past 5 minutes", + "title": "Ingoing request failures per second", "tooltip": { "shared": true, "sort": 2, @@ -1480,9 +1672,9 @@ }, "yaxes": [ { - "format": "bytes", - "label": "Max. notification size", - "logBase": 10, + "format": "short", + "label": null, + "logBase": 1, "max": null, "min": null, "show": true @@ -1493,7 +1685,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1508,117 +1700,81 @@ "h": 1, "w": 24, "x": 0, - "y": 60 + "y": 79 }, - "id": 27, + "id": 23, "panels": [], - "title": "Transport", + "repeat": "notif_protocol", + "title": "Notifications (${notif_protocol})", "type": "row" }, { "aliasColors": {}, - "bars": true, + "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 6, - "w": 24, + "w": 12, "x": 0, - "y": 61 + "y": 80 }, "hiddenSeries": false, - "id": 19, - "interval": "1m", + "id": 447, "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "max": false, - "min": false, + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, "rightSide": false, "show": true, "total": false, - "values": false + "values": true }, - "lines": false, + "lines": true, "linewidth": 1, - "maxPerRow": 2, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "established (in)", - "color": "#37872D" - }, - { - "alias": "established (out)", - "color": "#C4162A" - }, - { - "alias": "pending (out)", - "color": "#FF7383" - }, - { - "alias": "closed-recently", - "color": "#FADE2A", - "steppedLine": true - } - ], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [], "spaceLength": 10, - "stack": true, + "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance))", - "format": "time_series", - "hide": false, - "interval": "", - "legendFormat": "established (in)", - "refId": "A" - }, - { - "expr": "avg(sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance))", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "established (out)", - "refId": "C" - }, - { - "expr": "avg(sum by (instance) (${metric_namespace}_sub_libp2p_pending_connections{instance=~\"${nodename}\"}))", - "hide": false, + "expr": "${metric_namespace}_sub_libp2p_notifications_streams_opened_total{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"} - ${metric_namespace}_sub_libp2p_notifications_streams_closed_total{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}", "interval": "", - "legendFormat": "pending (out)", + "legendFormat": "{{instance}}", "refId": "B" - }, - { - "expr": "avg(sum by(instance) (increase(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval])))", - "hide": false, - "interval": "", - "legendFormat": "closed-recently", - "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average transport-level (TCP, QUIC, ...) connections per node", + "title": "Number of open substreams", "tooltip": { - "shared": true, - "sort": 0, + "shared": false, + "sort": 1, "value_type": "individual" }, "type": "graph", @@ -1632,7 +1788,7 @@ "yaxes": [ { "format": "short", - "label": "Connections", + "label": null, "logBase": 1, "max": null, "min": null, @@ -1658,52 +1814,66 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, - "w": 24, + "w": 12, "x": 0, - "y": 67 + "y": 86 }, "hiddenSeries": false, - "id": 189, + "id": 31, "interval": "1m", "legend": { - "alignAsTable": false, "avg": false, "current": false, - "hideEmpty": false, "max": false, "min": false, - "rightSide": false, + "rightSide": true, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "maxPerRow": 2, + "maxPerRow": 12, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeatDirection": "v", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "alias": "/(in)/", + "color": "#73BF69" + }, + { + "alias": "/(out)/", + "color": "#F2495C" + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "1 - \n\navg(\n ${metric_namespace}_sub_libp2p_distinct_peers_connections_opened_total{instance=~\"${nodename}\"} - ${metric_namespace}_sub_libp2p_distinct_peers_connections_closed_total{instance=~\"${nodename}\"}\n) by (instance)\n\n/\n\navg(\r\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}) by (instance)\r\n) by (instance)", - "format": "time_series", - "hide": false, + "expr": "avg by (direction) (irate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval]))", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -1711,7 +1881,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Percentage of peers for which we have more than one connection open", + "title": "Average network notifications per second", "tooltip": { "shared": true, "sort": 2, @@ -1725,686 +1895,10 @@ "show": true, "values": [] }, - "yaxes": [ - { - "format": "percentunit", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 73 - }, - "hiddenSeries": false, - "id": 39, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*/", - "color": "#FF780A" - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_incoming_connections_handshake_errors_total{instance=~\"${nodename}\"}[$__interval])) by (reason)", - "interval": "", - "legendFormat": "{{reason}}", - "refId": "A" - }, - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_listeners_errors_total{instance=~\"${nodename}\"}[$__interval]))", - "interval": "", - "legendFormat": "pre-handshake", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of incoming connection errors, averaged by node", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Errors", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateOranges", - "exponent": 0.5, - "max": 100, - "min": 0, - "mode": "spectrum" - }, - "dataFormat": "timeseries", - "datasource": "$data_source", - "description": "Each bucket represent a certain number of nodes using a certain bandwidth range.", - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 73 - }, - "heatmap": {}, - "hideZeroBuckets": false, - "highlightCards": true, - "id": 4, - "legend": { - "show": false - }, - "reverseYBuckets": false, - "targets": [ - { - "expr": "${metric_namespace}_network_per_sec_bytes{instance=~\"${nodename}\"}", - "format": "time_series", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Heatmap of network bandwidth", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": "2.5m", - "yAxis": { - "decimals": null, - "format": "Bps", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 79 - }, - "hiddenSeries": false, - "id": 81, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_pending_connections_errors_total{instance=~\"${nodename}\"}[$__interval])) by (reason)", - "interval": "", - "legendFormat": "{{reason}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Dialing attempt errors, averaged per node", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 79 - }, - "hiddenSeries": false, - "id": 46, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "maxPerRow": 2, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval])) by (reason)", - "interval": "", - "legendFormat": "{{reason}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disconnects, averaged per node", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": "Disconnects", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 86 - }, - "id": 52, - "panels": [], - "title": "GrandPa", - "type": "row" - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 87 - }, - "hiddenSeries": false, - "id": 54, - "interval": "1m", - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": false, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "/discard/", - "color": "#FA6400", - "zindex": -2 - }, - { - "alias": "/keep/", - "color": "#73BF69", - "zindex": 2 - }, - { - "alias": "/process_and_discard/", - "color": "#5794F2" - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_finality_grandpa_communication_gossip_validator_messages{instance=~\"${nodename}\"}[$__interval])) by (action, message)", - "interval": "", - "legendFormat": "{{message}} => {{action}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GrandPa messages received from the network, and action", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 93 - }, - "id": 25, - "panels": [], - "repeat": null, - "title": "Kademlia & authority-discovery", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "description": "", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 5, - "w": 12, - "x": 0, - "y": 94 - }, - "hiddenSeries": false, - "id": 33, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": true, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(${metric_namespace}_sub_libp2p_kbuckets_num_nodes{instance=~\"${nodename}\"}) by (instance)", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of entries in Kademlia k-buckets", - "tooltip": { - "shared": true, - "sort": 1, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "max": 0, - "min": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 0, - "fillGradient": 7, - "gridPos": { - "h": 5, - "w": 12, - "x": 12, - "y": 94 - }, - "hiddenSeries": false, - "id": 35, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(${metric_namespace}_sub_libp2p_kademlia_random_queries_total{instance=~\"${nodename}\"}[5m])", - "interval": "", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Kademlia random discovery queries started per second", - "tooltip": { - "shared": true, - "sort": 1, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, "yaxes": [ { "format": "cps", - "label": "Queries per second", + "label": "Notifs/sec", "logBase": 1, "max": null, "min": null, @@ -2430,44 +1924,70 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 6, "w": 12, "x": 0, - "y": 99 + "y": 92 }, "hiddenSeries": false, - "id": 111, + "id": 37, + "interval": "1m", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, + "hideZero": false, "max": false, "min": false, - "show": false, + "rightSide": true, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "maxPerRow": 12, + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "alias": "/(in)/", + "color": "#73BF69" + }, + { + "alias": "/(out)/", + "color": "#F2495C" + } + ], "spaceLength": 10, "stack": false, - "steppedLine": true, + "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_sub_libp2p_kademlia_records_count{instance=~\"${nodename}\"}", + "expr": "avg(irate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval])) by (direction)", + "instant": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -2475,10 +1995,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of Kademlia records", + "title": "Average bandwidth used by notifications", "tooltip": { "shared": true, - "sort": 1, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2491,8 +2011,8 @@ }, "yaxes": [ { - "format": "short", - "label": null, + "format": "Bps", + "label": "Bandwidth", "logBase": 1, "max": null, "min": null, @@ -2518,16 +2038,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, + "h": 6, "w": 12, - "x": 12, - "y": 99 + "x": 0, + "y": 98 }, "hiddenSeries": false, - "id": 112, + "id": 16, "legend": { "avg": false, "current": false, @@ -2539,21 +2066,23 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": false, + "steppedLine": true, "targets": [ { - "expr": "${metric_namespace}_sub_libp2p_kademlia_records_sizes_total{instance=~\"${nodename}\"}", + "expr": "max(${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"sent\"} - ignoring(action) ${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"received\"}) by (instance) > 0", "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -2563,10 +2092,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Total size of Kademlia records", + "title": "Total sizes of notifications waiting to be delivered to the rest of Substrate", "tooltip": { - "shared": true, - "sort": 2, + "shared": false, + "sort": 1, "value_type": "individual" }, "type": "graph", @@ -2606,19 +2135,24 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "description": "", - "fill": 0, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 1, "gridPos": { - "h": 5, + "h": 6, "w": 12, "x": 0, - "y": 103 + "y": 104 }, "hiddenSeries": false, - "id": 211, + "id": 21, "legend": { - "alignAsTable": false, "avg": false, "current": false, "max": false, @@ -2629,25 +2163,26 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, - "percentage": true, + "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_authority_discovery_known_authorities_count{instance=~\"${nodename}\"}", + "expr": "sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, protocol) / sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, protocol)", "format": "time_series", - "instant": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -2655,17 +2190,15 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of authorities discovered by authority-discovery", + "title": "Average size of sent and received notifications in the past 5 minutes", "tooltip": { "shared": true, - "sort": 1, + "sort": 2, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, - "max": 0, - "min": null, "mode": "time", "name": null, "show": true, @@ -2673,9 +2206,9 @@ }, "yaxes": [ { - "format": "short", - "label": null, - "logBase": 1, + "format": "bytes", + "label": "Max. notification size", + "logBase": 10, "max": null, "min": null, "show": true @@ -2700,19 +2233,24 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "description": "", - "fill": 0, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 1, "gridPos": { - "h": 5, + "h": 6, "w": 12, - "x": 12, - "y": 103 + "x": 0, + "y": 110 }, "hiddenSeries": false, - "id": 233, + "id": 134, "legend": { - "alignAsTable": false, "avg": false, "current": false, "max": false, @@ -2723,25 +2261,26 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, - "percentage": true, + "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_authority_discovery_amount_external_addresses_last_published{instance=~\"${nodename}\"}", + "expr": "histogram_quantile(1.0, sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, le))", "format": "time_series", - "instant": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -2749,17 +2288,15 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of addresses published by authority-discovery", + "title": "Maximum size of sent and received notifications in the past 5 minutes", "tooltip": { "shared": true, - "sort": 1, + "sort": 2, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, - "max": 0, - "min": null, "mode": "time", "name": null, "show": true, @@ -2767,9 +2304,9 @@ }, "yaxes": [ { - "format": "short", - "label": null, - "logBase": 1, + "format": "bytes", + "label": "Max. notification size", + "logBase": 10, "max": null, "min": null, "show": true @@ -2794,47 +2331,71 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "description": "99.9% of the time, the output queue size for this protocol is below the given value", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 5, + "h": 6, "w": 12, "x": 0, - "y": 108 + "y": 116 }, "hiddenSeries": false, - "id": 68, - "interval": "1m", + "id": 14, "legend": { + "alignAsTable": false, "avg": false, - "current": false, - "max": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, "min": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, - "nullPointMode": "connected", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [], + "repeat": "nodename", + "seriesOverrides": [ + { + "alias": "max", + "fill": 1, + "linewidth": 0 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_found\", instance=~\"${nodename}\"}[2h]) / ignoring(name) (\n rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_found\", instance=~\"${nodename}\"}[2h]) +\n ignoring(name) rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_not_found\", instance=~\"${nodename}\"}[2h])\n)", + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_notifications_queues_size_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[2m])) by (le, instance))", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{protocol}}", + "refId": "A" + }, + { + "expr": "max(histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_notifications_queues_size_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[2m])) by (le, instance)))", + "interval": "", + "legendFormat": "max", "refId": "B" } ], @@ -2842,10 +2403,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Authority discovery get_value success rate in past two hours", + "title": "99th percentile of queues sizes", "tooltip": { "shared": true, - "sort": 1, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -2858,11 +2419,11 @@ }, "yaxes": [ { - "format": "percentunit", + "format": "short", "label": null, "logBase": 1, - "max": "1.0", - "min": null, + "max": "300", + "min": "0", "show": true }, { @@ -2879,63 +2440,105 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 122 + }, + "id": 52, + "panels": [], + "title": "GrandPa", + "type": "row" + }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 5, + "h": 6, "w": 12, - "x": 12, - "y": 108 + "x": 0, + "y": 123 }, "hiddenSeries": false, - "id": 234, + "id": 54, "interval": "1m", "legend": { + "alignAsTable": true, "avg": false, "current": false, + "hideEmpty": true, + "hideZero": true, "max": false, "min": false, - "show": false, - "total": false, - "values": false + "rightSide": true, + "show": true, + "total": true, + "values": true }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "connected", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeatDirection": "v", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "alias": "/discard/", + "color": "#FA6400", + "zindex": -2 + }, + { + "alias": "/keep/", + "color": "#73BF69", + "zindex": 2 + }, + { + "alias": "/process_and_discard/", + "color": "#5794F2" + } + ], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_put\", instance=~\"${nodename}\"}[2h]) / ignoring(name) (\n rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_put\", instance=~\"${nodename}\"}[2h]) +\n ignoring(name) rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_put_failed\", instance=~\"${nodename}\"}[2h])\n)", + "expr": "rate(${metric_namespace}_finality_grandpa_communication_gossip_validator_messages{instance=~\"${nodename}\"}[$__interval])", "interval": "", - "legendFormat": "{{instance}}", - "refId": "B" + "legendFormat": "{{message}} => {{action}}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Authority discovery put_value success rate in past two hours", + "title": "GrandPa messages received from the network, and action", "tooltip": { "shared": true, - "sort": 1, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -2948,10 +2551,10 @@ }, "yaxes": [ { - "format": "percentunit", + "format": "short", "label": null, "logBase": 1, - "max": "1.0", + "max": null, "min": null, "show": true }, @@ -2961,17 +2564,62 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { "align": false, "alignLevel": null } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 129 + }, + "id": 25, + "panels": [], + "repeat": null, + "title": "Kademlia & authority-discovery", + "type": "row" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "folderId": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 130 + }, + "headings": true, + "id": 423, + "limit": 10, + "pluginVersion": "7.2.1", + "query": "kademlia", + "recent": false, + "search": false, + "starred": false, + "tags": [], + "timeFrom": null, + "timeShift": null, + "title": "Kademlia and Authority Discovery metrics moved to \"kademlia-and-authority-discovery\" dashboard.", + "type": "dashlist" } ], "refresh": "1m", - "schemaVersion": 22, + "schemaVersion": 26, "style": "dark", "tags": [], "templating": { @@ -2981,9 +2629,9 @@ "current": {}, "datasource": "$data_source", "definition": "${metric_namespace}_process_start_time_seconds", + "error": null, "hide": 0, - "includeAll": true, - "index": -1, + "includeAll": false, "label": "Instance name filter", "multi": true, "name": "nodename", @@ -3003,15 +2651,15 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "${metric_namespace}_sub_libp2p_notifications_sizes_count", + "definition": "${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\"}", + "error": null, "hide": 2, "includeAll": true, - "index": -1, "label": null, "multi": false, "name": "notif_protocol", "options": [], - "query": "${metric_namespace}_sub_libp2p_notifications_sizes_count", + "query": "${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\"}", "refresh": 1, "regex": "/protocol=\"(.*?)\"/", "skipUrlSync": false, @@ -3026,15 +2674,15 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "${metric_namespace}_sub_libp2p_requests_out_started_total", + "definition": "${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\"}", + "error": null, "hide": 2, "includeAll": true, - "index": -1, "label": null, "multi": false, "name": "request_protocol", "options": [], - "query": "${metric_namespace}_sub_libp2p_requests_out_started_total", + "query": "${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\"}", "refresh": 1, "regex": "/protocol=\"(.*?)\"/", "skipUrlSync": false, @@ -3048,9 +2696,10 @@ { "current": { "selected": false, - "text": "Prometheus", - "value": "Prometheus" + "text": "prometheus.parity-mgmt", + "value": "prometheus.parity-mgmt" }, + "error": null, "hide": 0, "includeAll": false, "label": "Source of data", @@ -3066,15 +2715,18 @@ { "current": { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false }, + "error": null, "hide": 2, "label": "Prefix of the metrics", "name": "metric_namespace", "options": [ { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false } ], "query": "${VAR_METRIC_NAMESPACE}", @@ -3101,11 +2753,8 @@ "1d" ] }, - "timezone": "", + "timezone": "utc", "title": "Substrate Networking", "uid": "vKVuiD9Zk", - "variables": { - "list": [] - }, - "version": 121 + "version": 147 } diff --git a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json index 539fdec086a37..a3db46ec6d2a9 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json @@ -13,7 +13,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.7.3" + "version": "7.3.6" }, { "type": "panel", @@ -26,6 +26,12 @@ "id": "prometheus", "name": "Prometheus", "version": "1.0.0" + }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" } ], "annotations": { @@ -75,18 +81,45 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1599471940817, + "iteration": 1610462629581, "links": [], "panels": [ { - "collapsed": false, "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, + "id": 42, + "options": { + "content": "", + "mode": "markdown" + }, + "pluginVersion": "7.3.6", + "repeat": "nodename", + "timeFrom": null, + "timeShift": null, + "title": "$nodename", + "type": "text" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, "id": 29, "panels": [], "title": "Tasks", @@ -94,17 +127,24 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 1 + "y": 2 }, "hiddenSeries": false, "id": 11, @@ -122,23 +162,25 @@ "total": false, "values": true }, - "lines": true, - "linewidth": 2, - "nullPointMode": "null", + "lines": false, + "linewidth": 1, + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, - "stack": false, - "steppedLine": true, + "stack": true, + "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[10m])) by (task_name)", + "expr": "irate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[10m])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -148,7 +190,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "CPU time spent on each task (average per node)", + "title": "CPU time spent on each task", "tooltip": { "shared": true, "sort": 2, @@ -191,13 +233,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 7 + "y": 8 }, "hiddenSeries": false, "id": 30, @@ -219,19 +268,21 @@ "linewidth": 2, "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "targets": [ { - "expr": "avg(irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])) by (task_name)", + "expr": "irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -241,7 +292,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Task polling rate per second (average per node)", + "title": "Task polling rate per second", "tooltip": { "shared": true, "sort": 2, @@ -284,104 +335,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 13 - }, - "hiddenSeries": false, - "id": 31, - "interval": "", - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "max(irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])) by (task_name)", - "interval": "", - "legendFormat": "{{task_name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Task polling rate per second (maximum per node)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "cps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 19 + "y": 14 }, "hiddenSeries": false, "id": 15, @@ -401,19 +368,21 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "targets": [ { - "expr": "avg by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m]))", + "expr": "irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -423,7 +392,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks started per second (average per node)", + "title": "Number of tasks started per second", "tooltip": { "shared": true, "sort": 2, @@ -466,104 +435,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 25 - }, - "hiddenSeries": false, - "id": 16, - "interval": "", - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "max by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m]))", - "interval": "", - "legendFormat": "{{task_name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of tasks started per second (maximum over all nodes)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 10, - "max": null, - "min": "0", - "show": true + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 31 + "y": 20 }, "hiddenSeries": false, "id": 2, @@ -583,110 +468,21 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "avg by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", - "interval": "", - "legendFormat": "{{task_name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of tasks running (average per node)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 10, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 37 - }, - "hiddenSeries": false, - "id": 3, - "interval": "", - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "targets": [ { - "expr": "max by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", + "expr": "${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason)\n\n# Fallback if tasks_ended_total is null for that task\nor on(instance, task_name) ${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -696,7 +492,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks running (maximum over all nodes)", + "title": "Number of tasks running", "tooltip": { "shared": true, "sort": 2, @@ -740,13 +536,20 @@ "dashes": false, "datasource": "$data_source", "decimals": null, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 43 + "y": 26 }, "hiddenSeries": false, "id": 7, @@ -768,19 +571,21 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": true, "targets": [ { - "expr": "avg(\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[10m])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[10m])\n) by (task_name) > 0", + "expr": "irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[10m])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[10m]) > 0", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -790,7 +595,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Calls to `Future::poll` that took more than one second (average per node)", + "title": "Number of calls to `Future::poll` that took more than one second", "tooltip": { "shared": true, "sort": 2, @@ -835,11 +640,11 @@ "h": 1, "w": 24, "x": 0, - "y": 49 + "y": 32 }, "id": 27, "panels": [], - "title": "Misc", + "title": "Unbounded Channels", "type": "row" }, { @@ -848,13 +653,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, - "y": 50 + "y": 33 }, "hiddenSeries": false, "id": 32, @@ -873,19 +685,21 @@ "linewidth": 1, "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"} - ignoring(action) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"received\"}) by (entity)", + "expr": "(\n ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"} - ignoring(action) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"received\"}\n)\n\n# Fallback if the `received` is null\nor on(instance) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}", "interval": "", "legendFormat": "{{entity}}", "refId": "B" @@ -895,7 +709,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Unbounded channels size (average per node)", + "title": "Unbounded channels size", "tooltip": { "shared": true, "sort": 2, @@ -938,13 +752,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, - "y": 57 + "y": 40 }, "hiddenSeries": false, "id": 33, @@ -963,19 +784,21 @@ "linewidth": 1, "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[10m])) by (entity)", + "expr": "irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[10m])", "interval": "", "legendFormat": "{{entity}}", "refId": "B" @@ -985,7 +808,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Unbounded channels rate (average per node)", + "title": "Unbounded channels message sending rate (1s)", "tooltip": { "shared": true, "sort": 2, @@ -1024,7 +847,7 @@ } ], "refresh": false, - "schemaVersion": 22, + "schemaVersion": 26, "style": "dark", "tags": [], "templating": { @@ -1034,9 +857,9 @@ "current": {}, "datasource": "$data_source", "definition": "${metric_namespace}_process_start_time_seconds", + "error": null, "hide": 0, - "includeAll": true, - "index": -1, + "includeAll": false, "label": "Instance filter", "multi": true, "name": "nodename", @@ -1055,15 +878,18 @@ { "current": { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false }, + "error": null, "hide": 2, "label": "Prefix of the metrics", "name": "metric_namespace", "options": [ { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false } ], "query": "${VAR_METRIC_NAMESPACE}", @@ -1076,6 +902,7 @@ "text": "prometheus.parity-mgmt", "value": "prometheus.parity-mgmt" }, + "error": null, "hide": 0, "includeAll": false, "label": "Source of all the data", @@ -1108,11 +935,8 @@ "1d" ] }, - "timezone": "", + "timezone": "utc", "title": "Substrate Service Tasks", "uid": "3LA6XNqZz", - "variables": { - "list": [] - }, - "version": 52 + "version": 59 } diff --git a/.maintain/update-copyright.sh b/.maintain/update-copyright.sh index d48fc3cc979d6..d67cab7c1e152 100755 --- a/.maintain/update-copyright.sh +++ b/.maintain/update-copyright.sh @@ -1,15 +1,14 @@ #!/usr/bin/env bash -SINGLE_DATES=$(grep -lr "// Copyright [0-9]* Parity Technologies (UK) Ltd.") -RANGE_DATES=$(grep -lr "// Copyright [0-9]*-[0-9]* Parity Technologies (UK) Ltd.") +SINGLE_DATES=$(grep -lr "// Copyright (C) [0-9]* Parity Technologies (UK) Ltd.") YEAR=$(date +%Y) for file in $SINGLE_DATES; do - FILE_YEAR=$(cat $file | sed -n "s|// Copyright \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|\1|p") + FILE_YEAR=$(cat $file | sed -n "s|// Copyright (C) \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|\1|p") if [ $YEAR -ne $FILE_YEAR ]; then - sed -i -e "s|// Copyright \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|// Copyright \1-$YEAR Parity Technologies (UK) Ltd.|g" $file + sed -i -e "s|// Copyright (C) \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|// Copyright (C) \1-$YEAR Parity Technologies (UK) Ltd.|g" $file fi done -grep -lr "// Copyright [0-9]*-[0-9]* Parity Technologies (UK) Ltd." | - xargs sed -i -e "s|// Copyright \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\)-[[:digit:]][[:digit:]][[:digit:]][[:digit:]] Parity Technologies (UK) Ltd.|// Copyright \1-$YEAR Parity Technologies (UK) Ltd.|g" +grep -lr "// Copyright (C) [0-9]*-[0-9]* Parity Technologies (UK) Ltd." | + xargs sed -i -e "s|// Copyright (C) \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\)-[[:digit:]][[:digit:]][[:digit:]][[:digit:]] Parity Technologies (UK) Ltd.|// Copyright (C) \1-$YEAR Parity Technologies (UK) Ltd.|g" diff --git a/Cargo.lock b/Cargo.lock index 0f3e68a844e30..edbf183e74618 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,11 +12,11 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.13.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ - "gimli 0.22.0", + "gimli", ] [[package]] @@ -31,89 +31,69 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] name = "aes" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" +checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" dependencies = [ "aes-soft", "aesni", - "block-cipher 0.7.1", + "block-cipher", ] [[package]] name = "aes-gcm" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" +checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" dependencies = [ "aead", "aes", - "block-cipher 0.7.1", + "block-cipher", "ghash", - "subtle 2.2.3", + "subtle 2.4.0", ] [[package]] name = "aes-soft" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" +checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" dependencies = [ - "block-cipher 0.7.1", + "block-cipher", "byteorder", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] name = "aesni" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" -dependencies = [ - "block-cipher 0.7.1", - "opaque-debug 0.2.3", -] - -[[package]] -name = "ahash" -version = "0.2.19" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29661b60bec623f0586702976ff4d0c9942dcb6723161c2df0eea78455cfedfb" +checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" dependencies = [ - "const-random", + "block-cipher", + "opaque-debug 0.3.0", ] [[package]] name = "ahash" -version = "0.3.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" [[package]] name = "aho-corasick" -version = "0.7.13" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex", - "num-traits", -] - [[package]] name = "ansi_term" version = "0.11.0" @@ -134,9 +114,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.31" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" [[package]] name = "approx" @@ -149,15 +129,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" - -[[package]] -name = "arc-swap" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" [[package]] name = "arrayref" @@ -176,9 +150,9 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "asn1_der" @@ -201,9 +175,9 @@ dependencies = [ [[package]] name = "assert_cmd" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c88b9ca26f9c16ec830350d309397e74ee9abdfd8eb1f71cb6ecc71a3fc818da" +checksum = "3dc1679af9a1ab4bea16f228b05d18f8363f8327b1fa8db00d2760cfafc6b61e" dependencies = [ "doc-comment", "predicates", @@ -214,9 +188,9 @@ dependencies = [ [[package]] name = "assert_matches" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" [[package]] name = "async-channel" @@ -231,53 +205,63 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801" +checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" dependencies = [ "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", "vec-arena", ] [[package]] name = "async-global-executor" -version = "1.3.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fefeb39da249f4c33af940b779a56723ce45809ef5c54dad84bb538d4ffb6d9e" +checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" dependencies = [ + "async-channel", "async-executor", "async-io", + "async-mutex", + "blocking", "futures-lite", "num_cpus", - "once_cell 1.4.1", + "once_cell", ] [[package]] name = "async-io" -version = "1.1.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38628c78a34f111c5a6b98fc87dfc056cd1590b61afe748b145be4623c56d194" +checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" dependencies = [ - "cfg-if", "concurrent-queue", "fastrand", "futures-lite", "libc", "log", - "once_cell 1.4.1", + "nb-connect", + "once_cell", "parking", "polling", - "socket2", "vec-arena", "waker-fn", - "wepoll-sys-stjepang", "winapi 0.3.9", ] +[[package]] +name = "async-lock" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" +dependencies = [ + "event-listener", +] + [[package]] name = "async-mutex" version = "1.4.0" @@ -287,17 +271,34 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-process" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" +dependencies = [ + "async-io", + "blocking", + "cfg-if 0.1.10", + "event-listener", + "futures-lite", + "once_cell", + "signal-hook", + "winapi 0.3.9", +] + [[package]] name = "async-std" -version = "1.6.5" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9fa76751505e8df1c7a77762f60486f60c71bbd9b8557f4da6ad47d083732ed" +checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" dependencies = [ + "async-channel", "async-global-executor", "async-io", - "async-mutex", - "blocking", - "crossbeam-utils", + "async-lock", + "async-process", + "crossbeam-utils 0.8.1", "futures-channel", "futures-core", "futures-io", @@ -307,8 +308,8 @@ dependencies = [ "log", "memchr", "num_cpus", - "once_cell 1.4.1", - "pin-project-lite", + "once_cell", + "pin-project-lite 0.2.4", "pin-utils", "slab", "wasm-bindgen-futures", @@ -316,32 +317,32 @@ dependencies = [ [[package]] name = "async-task" -version = "4.0.2" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab27c1aa62945039e44edaeee1dc23c74cc0c303dd5fe0fb462a184f1c3a518" +checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] -name = "async-tls" -version = "0.10.0" +name = "async-trait" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d85a97c4a0ecce878efd3f945f119c78a646d8975340bca0398f9bb05c30cc52" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ - "futures-core", - "futures-io", - "rustls", - "webpki", - "webpki-roots", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "async-trait" -version = "0.1.37" +name = "asynchronous-codec" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6" dependencies = [ - "proc-macro2", - "quote", - "syn", + "bytes 1.0.1", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.4", ] [[package]] @@ -350,7 +351,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" dependencies = [ - "autocfg 1.0.0", + "autocfg", ] [[package]] @@ -372,41 +373,35 @@ dependencies = [ [[package]] name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - -[[package]] -name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.20.0", + "object 0.23.0", "rustc-demangle", ] [[package]] -name = "base58" -version = "0.1.0" +name = "base-x" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] -name = "base64" -version = "0.11.0" +name = "base58" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" [[package]] name = "base64" @@ -438,10 +433,10 @@ checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" dependencies = [ "bitflags", "cexpr", - "cfg-if", + "cfg-if 0.1.10", "clang-sys", "clap", - "env_logger", + "env_logger 0.7.1", "lazy_static", "lazycell", "log", @@ -451,22 +446,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "which", -] - -[[package]] -name = "bip39" -version = "0.6.0-beta.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" -dependencies = [ - "failure", - "hashbrown 0.1.8", - "hmac", - "once_cell 0.1.8", - "pbkdf2", - "rand 0.6.5", - "sha2 0.8.2", + "which 3.1.1", ] [[package]] @@ -477,25 +457,25 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bitvec" -version = "0.17.4" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" dependencies = [ - "either", + "funty", "radium", + "tap", + "wyz", ] [[package]] name = "blake2" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" +checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" dependencies = [ - "byte-tools", - "byteorder", "crypto-mac 0.8.0", "digest 0.9.0", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] @@ -510,26 +490,41 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "constant_time_eq", ] [[package]] name = "blake2s_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9e07352b829279624ceb7c64adb4f585dacdb81d35cafae81139ccd617cf44" +checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "constant_time_eq", ] +[[package]] +name = "blake3" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "cc", + "cfg-if 0.1.10", + "constant_time_eq", + "crypto-mac 0.8.0", + "digest 0.9.0", +] + [[package]] name = "block-buffer" version = "0.7.3" @@ -549,16 +544,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "block-padding 0.2.1", - "generic-array 0.14.3", -] - -[[package]] -name = "block-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" -dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -567,7 +553,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -596,20 +582,20 @@ dependencies = [ "atomic-waker", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", ] [[package]] name = "bs58" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" dependencies = [ "lazy_static", "memchr", @@ -628,15 +614,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "099e596ef14349721d9016f6b80dd3419ea1bf289ab9b44df8e4dfd3a005d5d9" [[package]] name = "byte-slice-cast" -version = "0.3.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" +checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" [[package]] name = "byte-tools" @@ -646,9 +632,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -667,21 +653,37 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cache-padded" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +[[package]] +name = "cargo-platform" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0226944a63d1bf35a3b5f948dd7c59e263db83695c9e8bffc4037de02e30f1d7" +dependencies = [ + "serde", +] + [[package]] name = "cargo_metadata" -version = "0.10.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052dbdd9db69a339d5fa9ac87bfe2e1319f709119f0345988a597af82bb1011c" +checksum = "7714a157da7991e23d90686b9524b9e12e0407a108647f52e9328f4b3d51ac7f" dependencies = [ - "semver 0.10.0", + "cargo-platform", + "semver 0.11.0", + "semver-parser 0.10.2", "serde", - "serde_derive", "serde_json", ] @@ -696,9 +698,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.58" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" dependencies = [ "jobserver", ] @@ -718,26 +720,32 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chacha20" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c0f07ac275808b7bf9a39f2fd013aae1498be83632814c8c4e0bd53f2dc58" +checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" dependencies = [ - "stream-cipher 0.4.1", + "stream-cipher", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b0c90556d8e3fec7cf18d84a2f53d27b21288f2fe481b830fadcf809e48205" +checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" dependencies = [ "aead", "chacha20", "poly1305", - "stream-cipher 0.4.1", + "stream-cipher", "zeroize", ] @@ -757,15 +765,46 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.13" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "js-sys", + "libc", "num-integer", "num-traits", "time", "wasm-bindgen", + "winapi 0.3.9", +] + +[[package]] +name = "cid" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d88f30b1e74e7063df5711496f3ee6e74a9735d62062242d70cddf77717f18e" +dependencies = [ + "multibase", + "multihash", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "ckb-merkle-mountain-range" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e486fe53bb9f2ca0f58cb60e8679a5354fd6687a839942ef0a75967250289ca6" +dependencies = [ + "cfg-if 0.1.10", ] [[package]] @@ -781,9 +820,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.1" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term 0.11.0", "atty", @@ -803,15 +842,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "cloudabi" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" -dependencies = [ - "bitflags", -] - [[package]] name = "concurrent-queue" version = "1.2.2" @@ -827,39 +857,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "wasm-bindgen", ] [[package]] -name = "console_log" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7871d2947441b0fdd8e2bd1ce2a2f75304f896582c0d572162d48290683c48" -dependencies = [ - "log", - "web-sys", -] - -[[package]] -name = "const-random" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dc82c12dc2ee6e1ded861cf7d582b46f66f796d1b6c93fa28b911ead95da02" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.11" +name = "const_fn" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc757bbb9544aa296c2ae00c679e81f886b37e28e59097defe0cf524306f6685" -dependencies = [ - "getrandom 0.2.0", - "proc-macro-hack", -] +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -883,46 +889,62 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "cpp_demangle" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" +dependencies = [ + "cfg-if 1.0.0", + "glob", +] + [[package]] name = "cpuid-bool" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" +[[package]] +name = "cpuid-bool" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" + [[package]] name = "cranelift-bforest" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dcc286b052ee24a1e5a222e7c1125e6010ad35b0f248709b9b3737a8fedcfdf" +checksum = "4066fd63b502d73eb8c5fa6bcab9c7962b05cd580f6b149ee83a8e730d8ce7fb" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" +checksum = "1a54e4beb833a3c873a18a8fe735d73d732044004c7539a072c8faa35ccb0c60" dependencies = [ "byteorder", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", - "gimli 0.21.0", + "gimli", "log", "regalloc", "serde", - "smallvec 1.4.1", + "smallvec 1.6.1", "target-lexicon", "thiserror", ] [[package]] name = "cranelift-codegen-meta" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3f460031861e4f4ad510be62b2ae50bba6cc886b598a36f9c0a970feab9598" +checksum = "c54cac7cacb443658d8f0ff36a3545822613fa202c946c0891897843bc933810" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -930,36 +952,36 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ad12409e922e7697cd0bdc7dc26992f64a77c31880dfe5e3c7722f4710206d" +checksum = "a109760aff76788b2cdaeefad6875a73c2b450be13906524f6c2a81e05b8d83c" [[package]] name = "cranelift-entity" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97cdc58972ea065d107872cfb9079f4c92ade78a8af85aaff519a65b5d13f71" +checksum = "3b044234aa32531f89a08b487630ddc6744696ec04c8123a1ad388de837f5de3" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ef419efb4f94ecc02e5d9fbcc910d2bb7f0040e2de570e63a454f883bc891d6" +checksum = "5452b3e4e97538ee5ef2cc071301c69a86c7adf2770916b9d04e9727096abd93" dependencies = [ "cranelift-codegen", "log", - "smallvec 1.4.1", + "smallvec 1.6.1", "target-lexicon", ] [[package]] name = "cranelift-native" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e69d44d59826eef6794066ac2c0f4ad3975f02d97030c60dbc04e3886adf36e" +checksum = "f68035c10b2e80f26cc29c32fa824380877f38483504c2a47b54e7da311caaf3" dependencies = [ "cranelift-codegen", "raw-cpuid", @@ -968,40 +990,42 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.66.0" +version = "0.69.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "979df666b1304624abe99738e9e0e7c7479ee5523ba4b8b237df9ff49996acbb" +checksum = "a530eb9d1c95b3309deb24c3d179d8b0ba5837ed98914a429787c395f614949d" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", + "itertools 0.9.0", "log", "serde", + "smallvec 1.6.1", "thiserror", - "wasmparser 0.59.0", + "wasmparser", ] [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] name = "criterion" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70daa7ceec6cf143990669a04c7df13391d55fb27bd4079d252fca774ba244d8" +checksum = "ab327ed7354547cc2ef43cbe20ef68b988e70b4b593cbd66a2a61733123a3d23" dependencies = [ "atty", "cast", "clap", "criterion-plot", "csv", - "itertools 0.9.0", + "itertools 0.10.0", "lazy_static", "num-traits", "oorandom", @@ -1026,30 +1050,65 @@ dependencies = [ "itertools 0.9.0", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.1", +] + [[package]] name = "crossbeam-deque" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.7.2", "maybe-uninit", ] +[[package]] +name = "crossbeam-deque" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-epoch 0.9.1", + "crossbeam-utils 0.8.1", +] + [[package]] name = "crossbeam-epoch" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 1.0.0", - "cfg-if", - "crossbeam-utils", + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", - "memoffset", - "scopeguard 1.1.0", + "memoffset 0.5.6", + "scopeguard", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" +dependencies = [ + "cfg-if 1.0.0", + "const_fn", + "crossbeam-utils 0.8.1", + "lazy_static", + "memoffset 0.6.1", + "scopeguard", ] [[package]] @@ -1058,8 +1117,8 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ - "cfg-if", - "crossbeam-utils", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -1069,8 +1128,19 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 1.0.0", - "cfg-if", + "autocfg", + "cfg-if 0.1.10", + "lazy_static", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +dependencies = [ + "autocfg", + "cfg-if 1.0.0", "lazy_static", ] @@ -1096,15 +1166,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.3", - "subtle 2.2.3", + "generic-array 0.14.4", + "subtle 2.4.0", ] [[package]] name = "csv" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +checksum = "f9d58633299b24b515ac72a3f869f8b91306a3cec616a602843a383acd6f9e97" dependencies = [ "bstr", "csv-core", @@ -1133,9 +1203,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39858aa5bac06462d4dd4b9164848eb81ffc4aa5c479746393598fd193afa227" +checksum = "10bcb9d7dcbf7002aaffbb53eac22906b64cdcc127971dcc387d8eb7c95d5560" dependencies = [ "quote", "syn", @@ -1154,41 +1224,61 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "2.1.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" +checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" dependencies = [ "byteorder", "digest 0.8.1", "rand_core 0.5.1", - "subtle 2.2.3", + "subtle 2.4.0", "zeroize", ] [[package]] name = "curve25519-dalek" -version = "3.0.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" +checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" dependencies = [ "byteorder", "digest 0.9.0", "rand_core 0.5.1", - "subtle 2.2.3", + "subtle 2.4.0", "zeroize", ] [[package]] name = "data-encoding" -version = "2.2.1" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + +[[package]] +name = "data-encoding-macro" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a94feec3d2ba66c0b6621bca8bc6f68415b1e5c69af3586fdd0af9fd9f29b17" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" +checksum = "f0f83e699727abca3c56e187945f303389590305ab2f0185ea445aa66e8d5f2a" +dependencies = [ + "data-encoding", + "syn", +] [[package]] name = "derive_more" -version = "0.99.9" +version = "0.99.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2", "quote", @@ -1216,19 +1306,28 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] name = "directories" -version = "2.0.2" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" dependencies = [ - "cfg-if", "dirs-sys", ] +[[package]] +name = "directories-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] + [[package]] name = "dirs-sys" version = "0.3.5" @@ -1236,7 +1335,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", - "redox_users", + "redox_users 0.3.5", + "winapi 0.3.9", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users 0.4.0", "winapi 0.3.9", ] @@ -1285,15 +1395,15 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" [[package]] name = "ed25519" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf038a7b6fd7ef78ad3348b63f3a17550877b0e28f8d68bcc94894d1412158bc" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" dependencies = [ "signature", ] @@ -1304,19 +1414,19 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 3.0.0", + "curve25519-dalek 3.0.2", "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.1", + "sha2 0.9.3", "zeroize", ] [[package]] name = "either" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "enumflags2" @@ -1345,7 +1455,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", - "humantime", + "humantime 1.3.0", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "env_logger" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26ecb66b4bdca6c1409b40fb255eefc2bd4f6d135dab3c3124f80ffa2a9661e" +dependencies = [ + "atty", + "humantime 2.1.0", "log", "regex", "termcolor", @@ -1359,18 +1482,18 @@ checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" [[package]] name = "erased-serde" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ca8b296792113e1500fd935ae487be6e00ce318952a6880555554824d6ebf38" +checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c" dependencies = [ "serde", ] [[package]] name = "errno" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01" +checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" dependencies = [ "errno-dragonfly", "libc", @@ -1387,92 +1510,19 @@ dependencies = [ "libc", ] -[[package]] -name = "ethbloom" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a6567e6fd35589fea0c63b94b4cf2e55573e413901bdbe60ab15cf0e25e5df" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473aecff686bd8e7b9db0165cbbb53562376b39bf35b427f0c60446a9e1634b0" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - [[package]] name = "event-listener" version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" -[[package]] -name = "evm" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68224b0aa788720ef0c8a23030a4412a021ed73df069a922bee8f0db9ed617e2" -dependencies = [ - "evm-core", - "evm-gasometer", - "evm-runtime", - "primitive-types", - "rlp", - "serde", - "sha3 0.8.2", -] - -[[package]] -name = "evm-core" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a040378759577447945c89da1b07d6e33fda32a97a104afe0ec3fa1c382949d" -dependencies = [ - "primitive-types", -] - -[[package]] -name = "evm-gasometer" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb5bc051afad6bb0735c82b46656bbdfac41917861307a608b1404a546fec42" -dependencies = [ - "evm-core", - "evm-runtime", - "primitive-types", -] - -[[package]] -name = "evm-runtime" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7410f5677a52203d3fca02b0eb8f96f9799f3a45cff82946a8ed28379e6b1b04" -dependencies = [ - "evm-core", - "primitive-types", - "sha3 0.8.2", -] - [[package]] name = "exit-future" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", ] [[package]] @@ -1529,38 +1579,38 @@ dependencies = [ [[package]] name = "file-per-thread-logger" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" +checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" dependencies = [ - "env_logger", + "env_logger 0.7.1", "log", ] [[package]] name = "finality-grandpa" -version = "0.12.3" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" +checksum = "2cd795898c348a8ec9edc66ec9e014031c764d4c88cc26d09b492cd93eb41339" dependencies = [ "either", - "futures 0.3.5", - "futures-timer 2.0.2", + "futures 0.3.12", + "futures-timer 3.0.2", "log", "num-traits", "parity-scale-codec", - "parking_lot 0.9.0", - "rand 0.6.5", + "parking_lot 0.11.1", + "rand 0.8.3", ] [[package]] name = "fixed-hash" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.7.3", + "rand 0.8.3", "rustc-hex", "static_assertions", ] @@ -1573,11 +1623,11 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.16" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" +checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crc32fast", "libc", "libz-sys", @@ -1592,21 +1642,32 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", ] +[[package]] +name = "form_urlencoded" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] + [[package]] name = "frame-benchmarking" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", "hex-literal", "linregress", "parity-scale-codec", - "paste 0.1.18", + "paste 1.0.4", + "serde", "sp-api", "sp-io", "sp-runtime", @@ -1617,8 +1678,9 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "Inflector", "chrono", "frame-benchmarking", "handlebars", @@ -1638,7 +1700,7 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -1658,7 +1720,7 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "12.0.0" +version = "13.0.0" dependencies = [ "parity-scale-codec", "serde", @@ -1668,7 +1730,7 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0" +version = "3.0.0" dependencies = [ "bitflags", "frame-metadata", @@ -1676,19 +1738,20 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", - "once_cell 1.4.1", + "once_cell", "parity-scale-codec", "parity-util-mem", - "paste 0.1.18", + "paste 1.0.4", "pretty_assertions", "serde", - "smallvec 1.4.1", + "smallvec 1.6.1", "sp-api", "sp-arithmetic", "sp-core", "sp-inherents", "sp-io", "sp-runtime", + "sp-staking", "sp-state-machine", "sp-std", "sp-tracing", @@ -1697,8 +1760,9 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "Inflector", "frame-support-procedural-tools", "proc-macro2", "quote", @@ -1707,7 +1771,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -1718,7 +1782,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro2", "quote", @@ -1727,10 +1791,11 @@ dependencies = [ [[package]] name = "frame-support-test" -version = "2.0.0" +version = "2.0.1" dependencies = [ "frame-metadata", "frame-support", + "frame-system", "parity-scale-codec", "pretty_assertions", "rustversion", @@ -1746,7 +1811,7 @@ dependencies = [ [[package]] name = "frame-system" -version = "2.0.0" +version = "3.0.0" dependencies = [ "criterion", "frame-support", @@ -1764,7 +1829,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -1779,7 +1844,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -1787,31 +1852,21 @@ dependencies = [ [[package]] name = "fs-swap" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921d332c89b3b61a826de38c61ee5b6e02c56806cade1b0e5d81bd71f57a71bb" +checksum = "5839fda247e24ca4919c87c71dd5ca658f1f39e4f06829f80e3f15c3bafcfc2c" dependencies = [ "lazy_static", "libc", - "libloading", - "winapi 0.3.9", -] - -[[package]] -name = "fs2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -dependencies = [ - "libc", + "libloading", "winapi 0.3.9", ] [[package]] name = "fs_extra" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "fuchsia-cprng" @@ -1835,17 +1890,23 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "futures" -version = "0.1.29" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -1858,34 +1919,19 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", ] -[[package]] -name = "futures-channel-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5e5f4df964fa9c1c2f8bddeb5c3611631cacd93baf810fc8bb2fb4b495c263a" -dependencies = [ - "futures-core-preview", -] - [[package]] name = "futures-core" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-core-preview" -version = "0.3.0-alpha.19" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35b6263fb1ef523c3056565fa67b1d16f0a8604ff12b11b08c25f28a734c60a" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-cpupool" @@ -1893,7 +1939,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "num_cpus", ] @@ -1903,21 +1949,21 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.12", "lazy_static", "log", "parking_lot 0.9.0", - "pin-project 0.4.22", + "pin-project 0.4.27", "serde", "serde_json", ] [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -1927,30 +1973,30 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-lite" -version = "1.11.1" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381a7ad57b1bad34693f63f6f377e1abded7a9c85c9d3eb6771e11c60aaadab9" +checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" dependencies = [ "fastrand", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite", + "pin-project-lite 0.2.4", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -1958,19 +2004,30 @@ dependencies = [ "syn", ] +[[package]] +name = "futures-rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" +dependencies = [ + "futures-io", + "rustls 0.19.0", + "webpki", +] + [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ - "once_cell 1.4.1", + "once_cell", ] [[package]] @@ -1991,11 +2048,11 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "futures-channel", "futures-core", "futures-io", @@ -2003,37 +2060,13 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 0.4.22", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", "slab", ] -[[package]] -name = "futures-util-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce968633c17e5f97936bd2797b6e38fb56cf16a7422319f7ec2e30d3c470e8d" -dependencies = [ - "futures-channel-preview", - "futures-core-preview", - "pin-utils", - "slab", -] - -[[package]] -name = "futures_codec" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" -dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", - "memchr", - "pin-project 0.4.22", -] - [[package]] name = "gcc" version = "0.3.55" @@ -2051,9 +2084,18 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.3" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", "version_check", @@ -2061,53 +2103,51 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", + "js-sys", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", + "js-sys", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] name = "ghash" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6e27f0689a6e15944bdce7e45425efb87eaa8ab0c6e87f11d0987a9133e2531" +checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" dependencies = [ + "opaque-debug 0.3.0", "polyval", ] [[package]] name = "gimli" -version = "0.21.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" dependencies = [ "fallible-iterator", "indexmap", "stable_deref_trait", ] -[[package]] -name = "gimli" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" - [[package]] name = "glob" version = "0.3.0" @@ -2116,9 +2156,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" +checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" dependencies = [ "aho-corasick", "bstr", @@ -2149,7 +2189,7 @@ dependencies = [ "byteorder", "bytes 0.4.12", "fnv", - "futures 0.1.29", + "futures 0.1.30", "http 0.1.21", "indexmap", "log", @@ -2160,34 +2200,35 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ "bytes 0.5.6", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.1", + "http 0.2.3", "indexmap", "slab", - "tokio 0.2.22", + "tokio 0.2.25", "tokio-util", "tracing", + "tracing-futures", ] [[package]] name = "half" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" +checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" [[package]] name = "handlebars" -version = "3.5.0" +version = "3.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcd1b5399b9884f9ae18b5d4105d180720c8f602aeb73d3ceae9d6b1d13a5fa7" +checksum = "964d0e99a61fe9b1b347389b77ebf8b7e1587b70293676aaca7d27e59b9073b2" dependencies = [ "log", "pest", @@ -2214,48 +2255,27 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" -dependencies = [ - "byteorder", - "scopeguard 0.3.3", -] - -[[package]] -name = "hashbrown" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" -dependencies = [ - "ahash 0.2.19", - "autocfg 0.1.7", -] - -[[package]] -name = "hashbrown" -version = "0.8.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" dependencies = [ - "ahash 0.3.8", - "autocfg 1.0.0", + "ahash", ] [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -2288,6 +2308,16 @@ dependencies = [ "digest 0.8.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + [[package]] name = "hmac-drbg" version = "0.2.0" @@ -2296,14 +2326,14 @@ checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" dependencies = [ "digest 0.8.1", "generic-array 0.12.3", - "hmac", + "hmac 0.7.1", ] [[package]] name = "honggfuzz" -version = "0.5.49" +version = "0.5.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832bac18a82ec7d6c21887daa8616b238fe90d5d5e762d0d4b9372cdaa9e097f" +checksum = "ead88897bcad1c396806d6ccba260a0363e11da997472e9e19ab9889969083a2" dependencies = [ "arbitrary", "lazy_static", @@ -2323,11 +2353,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "fnv", "itoa", ] @@ -2339,7 +2369,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "http 0.1.21", "tokio-buf", ] @@ -2351,7 +2381,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ "bytes 0.5.6", - "http 0.2.1", + "http 0.2.3", ] [[package]] @@ -2360,6 +2390,12 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + [[package]] name = "humantime" version = "1.3.0" @@ -2369,6 +2405,12 @@ dependencies = [ "quick-error 1.2.3", ] +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.12.35" @@ -2376,7 +2418,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "futures-cpupool", "h2 0.1.26", "http 0.1.21", @@ -2390,7 +2432,7 @@ dependencies = [ "time", "tokio 0.1.22", "tokio-buf", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", "tokio-reactor", "tokio-tcp", @@ -2401,23 +2443,23 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.7" +version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2 0.2.6", - "http 0.2.1", + "h2 0.2.7", + "http 0.2.3", "http-body 0.3.1", "httparse", + "httpdate", "itoa", - "pin-project 0.4.22", + "pin-project 1.0.4", "socket2", - "time", - "tokio 0.2.22", + "tokio 0.2.25", "tower-service", "tracing", "want 0.3.0", @@ -2432,11 +2474,11 @@ dependencies = [ "bytes 0.5.6", "ct-logs", "futures-util", - "hyper 0.13.7", + "hyper 0.13.9", "log", - "rustls", + "rustls 0.18.1", "rustls-native-certs", - "tokio 0.2.22", + "tokio 0.2.25", "tokio-rustls", "webpki", ] @@ -2465,9 +2507,9 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12906406f12abf5569643c46b29aec78313dc1537b17dd5c5250169790c4db9" +checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" dependencies = [ "if-addrs-sys", "libc", @@ -2476,30 +2518,37 @@ dependencies = [ [[package]] name = "if-addrs-sys" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2556f16544202bcfe0aa5d20a01a6b815f736b136b3ad76dc547ee6b5bb1df" +checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" dependencies = [ "cc", "libc", ] [[package]] -name = "impl-codec" -version = "0.4.2" +name = "if-watch" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" dependencies = [ - "parity-scale-codec", + "async-io", + "futures 0.3.12", + "futures-lite", + "if-addrs", + "ipnet", + "libc", + "log", + "winapi 0.3.9", ] [[package]] -name = "impl-rlp" -version = "0.2.1" +name = "impl-codec" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f7a72f11830b52333f36e3b09a288333888bf54380fd0ac0790a3c31ab0f3c5" +checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" dependencies = [ - "rlp", + "parity-scale-codec", ] [[package]] @@ -2513,9 +2562,9 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.1.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" +checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" dependencies = [ "proc-macro2", "quote", @@ -2524,26 +2573,35 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ - "autocfg 1.0.0", - "hashbrown 0.8.1", + "autocfg", + "hashbrown", "serde", ] [[package]] name = "instant" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] [[package]] name = "integer-sqrt" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] [[package]] name = "intervalier" @@ -2551,7 +2609,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "futures-timer 2.0.2", ] @@ -2578,27 +2636,27 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" -version = "0.8.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ "either", ] [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jobserver" @@ -2611,21 +2669,21 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.39" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" +checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonrpc-client-transports" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f7b1cdf66312002e15682a24430728bd13036c641163c016bc53fb686a7c2d" +checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" dependencies = [ "failure", - "futures 0.1.29", + "futures 0.1.30", "hyper 0.12.35", "jsonrpc-core", "jsonrpc-pubsub", @@ -2637,11 +2695,11 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b12567a31d48588a65b6cf870081e6ba1d7b2ae353977cb9820d512e69c70" +checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "log", "serde", "serde_derive", @@ -2650,18 +2708,18 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175ca0cf77439b5495612bf216c650807d252d665b4b70ab2eebd895a88fac1" +checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" dependencies = [ "jsonrpc-client-transports", ] [[package]] name = "jsonrpc-derive" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2cc6ea7f785232d9ca8786a44e9fa698f92149dcdc1acc4aa1fc69c4993d79e" +checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2671,9 +2729,9 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9996b26c0c7a59626d0ed6c5ec8bf06218e62ce1474bd2849f9b9fd38a0158c0" +checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" dependencies = [ "hyper 0.12.35", "jsonrpc-core", @@ -2686,9 +2744,9 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e8f2278fb2b277175b6e21b23e7ecf30e78daff5ee301d0a2a411d9a821a0a" +checksum = "cf50e53e4eea8f421a7316c5f63e395f7bc7c4e786a6dc54d76fab6ff7aa7ce7" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", @@ -2700,9 +2758,9 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f389c5cd1f3db258a99296892c21047e21ae73ff4c0e2d39650ea86fe994b4c7" +checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" dependencies = [ "jsonrpc-core", "log", @@ -2713,9 +2771,9 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c623e1895d0d9110cb0ea7736cfff13191ff52335ad33b21bd5c775ea98b27af" +checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" dependencies = [ "bytes 0.4.12", "globset", @@ -2729,9 +2787,9 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436a92034d0137ab3e3c64a7a6350b428f31cb4d7d1a89f284bcdbcd98a7bc56" +checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", @@ -2779,30 +2837,30 @@ dependencies = [ [[package]] name = "kvdb" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" +checksum = "8891bd853eff90e33024195d79d578dc984c82f9e0715fcd2b525a0c19d52811" dependencies = [ "parity-util-mem", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] name = "kvdb-memorydb" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73de822b260a3bdfb889dbbb65bb2d473eee2253973d6fa4a5d149a2a4a7c66e" +checksum = "30a0da8e08caf08d384a620ec19bb6c9b85c84137248e202617fb91881f25912" dependencies = [ "kvdb", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.1", ] [[package]] name = "kvdb-rocksdb" -version = "0.9.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44947dd392f09475af614d740fe0320b66d01cb5b977f664bbbb5e45a70ea4c1" +checksum = "34446c373ccc494c2124439281c198c7636ccdc2752c06722bbffd56d459c1e4" dependencies = [ "fs-swap", "kvdb", @@ -2810,25 +2868,26 @@ dependencies = [ "num_cpus", "owning_ref", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "regex", "rocksdb", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] name = "kvdb-web" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" +checksum = "eb1e98ba343d0b35f9009a8844cd2b87fa3192f7e79033ac05b00aeae0f3b0b5" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "js-sys", "kvdb", "kvdb-memorydb", "log", "parity-util-mem", - "send_wrapper 0.3.0", + "parking_lot 0.11.1", + "send_wrapper 0.5.0", "wasm-bindgen", "web-sys", ] @@ -2841,9 +2900,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "leb128" @@ -2853,9 +2912,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" [[package]] name = "libloading" @@ -2875,13 +2934,13 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.29.1" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021f703bfef6e3da78ef9828c8a244d639b8d57eedf58360922aca5ff69dfdcd" +checksum = "d5133112ce42be9482f6a87be92a605dd6bbc9e93c297aee77d172ff06908f3a" dependencies = [ "atomic", - "bytes 0.5.6", - "futures 0.3.5", + "bytes 1.0.1", + "futures 0.3.12", "lazy_static", "libp2p-core", "libp2p-core-derive", @@ -2904,26 +2963,25 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multihash", "parity-multiaddr", - "parking_lot 0.11.0", - "pin-project 1.0.1", - "smallvec 1.4.1", + "parking_lot 0.11.1", + "pin-project 1.0.4", + "smallvec 1.6.1", "wasm-timer", ] [[package]] name = "libp2p-core" -version = "0.23.1" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3960524389409633550567e8a9e0684d25a33f4f8408887ff897dd9fdfbdb771" +checksum = "dad04d3cef6c1df366a6ab58c9cf8b06497699e335d83ac2174783946ff847d6" dependencies = [ "asn1_der", "bs58", "ed25519-dalek", "either", "fnv", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", @@ -2931,26 +2989,26 @@ dependencies = [ "multihash", "multistream-select", "parity-multiaddr", - "parking_lot 0.11.0", - "pin-project 1.0.1", + "parking_lot 0.11.1", + "pin-project 1.0.4", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.1", - "smallvec 1.4.1", + "sha2 0.9.3", + "smallvec 1.6.1", "thiserror", - "unsigned-varint 0.5.1", + "unsigned-varint 0.6.0", "void", "zeroize", ] [[package]] name = "libp2p-core-derive" -version = "0.20.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" +checksum = "f4bc40943156e42138d22ed3c57ff0e1a147237742715937622a99b10fbe0156" dependencies = [ "quote", "syn", @@ -2958,182 +3016,180 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.23.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567962c5c5f8a1282979441300e1739ba939024010757c3dbfab4d462189df77" +checksum = "6d42eed63305f0420736fa487f9acef720c4528bd7852a6a760f5ccde4813345" dependencies = [ "flate2", - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436280f5fe21a58fcaff82c2606945579241f32bc0eaf2d39321aa4624a66e7f" +checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", "log", ] [[package]] name = "libp2p-floodsub" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc175613c5915332fd6458895407ec242ea055ae3b107a586626d5e3349350a" +checksum = "b3c63dfa06581b24b1d12bf9815b43689a784424be217d6545c800c7c75a207f" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", "libp2p-swarm", "log", "prost", "prost-build", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] name = "libp2p-gossipsub" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d500ad89ba14de4d18bebdff61a0ce3e769f1c5c5a95026c5da90187e5fff5c9" +checksum = "12451ba9493e87c91baf2a6dffce9ddf1fbc807a0861532d7cf477954f8ebbee" dependencies = [ + "asynchronous-codec", "base64 0.13.0", "byteorder", - "bytes 0.5.6", + "bytes 1.0.1", "fnv", - "futures 0.3.5", - "futures_codec", + "futures 0.3.12", "hex_fmt", "libp2p-core", "libp2p-swarm", "log", - "lru_time_cache", "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.1", - "smallvec 1.4.1", - "unsigned-varint 0.5.1", + "regex", + "sha2 0.9.3", + "smallvec 1.6.1", + "unsigned-varint 0.6.0", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b90b350e37f398b73d778bd94422f4e6a3afa2c1582742ce2446b8a0dba787" +checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", "libp2p-swarm", "log", "prost", "prost-build", - "smallvec 1.4.1", + "smallvec 1.6.1", "wasm-timer", ] [[package]] name = "libp2p-kad" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb78341f114bf686d5fe50b33ff1a804d88fb326c0d39ee1c22db4346b21fc27" +checksum = "456f5de8e283d7800ca848b9b9a4e2a578b790bd8ae582b885e831353cf0e5df" dependencies = [ - "arrayvec 0.5.1", - "bytes 0.5.6", + "arrayvec 0.5.2", + "asynchronous-codec", + "bytes 1.0.1", "either", "fnv", - "futures 0.3.5", - "futures_codec", + "futures 0.3.12", "libp2p-core", "libp2p-swarm", "log", - "multihash", "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.1", - "smallvec 1.4.1", + "sha2 0.9.3", + "smallvec 1.6.1", "uint", - "unsigned-varint 0.5.1", + "unsigned-varint 0.6.0", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.23.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b575514fce0a3ccbd065d6aa377bd4d5102001b05c1a22a5eee49c450254ef0f" +checksum = "b974db63233fc0e199f4ede7794294aae285c96f4b6010f853eac4099ef08590" dependencies = [ - "async-std", + "async-io", "data-encoding", "dns-parser", - "either", - "futures 0.3.5", + "futures 0.3.12", + "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", "log", - "net2", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", + "socket2", "void", - "wasm-timer", ] [[package]] name = "libp2p-mplex" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "696c8ee8b42496690b88b0de84a96387caf6e09880bcc8e794bb88afa054e995" +checksum = "2705dc94b01ab9e3779b42a09bbf3712e637ed213e875c30face247291a85af0" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", - "futures_codec", + "asynchronous-codec", + "bytes 1.0.1", + "futures 0.3.12", "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "rand 0.7.3", - "smallvec 1.4.1", - "unsigned-varint 0.5.1", + "smallvec 1.6.1", + "unsigned-varint 0.6.0", ] [[package]] name = "libp2p-noise" -version = "0.25.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c77142e3e5b18fefa7d267305c777c9cbe9b2232ec489979390100bebcc1e6" +checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" dependencies = [ - "bytes 0.5.6", - "curve25519-dalek 3.0.0", - "futures 0.3.5", + "bytes 1.0.1", + "curve25519-dalek 3.0.2", + "futures 0.3.12", "lazy_static", "libp2p-core", "log", "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.1", + "sha2 0.9.3", "snow", "static_assertions", - "x25519-dalek 1.1.0", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7257135609e8877f4d286935cbe1e572b2018946881c3e7f63054577074a7ee7" +checksum = "6f3813276d0708c8db0f500d8beda1bda9ad955723b9cb272c41f4727256f73c" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", "libp2p-swarm", "log", @@ -3144,82 +3200,83 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c88d59ba3e710a8c8e0535cb4a52e9e46534924cbbea4691f8c3aaad17b58c61" +checksum = "48e8c1ec305c9949351925cdc7196b9570f4330477f5e47fbf5bb340b57e26ed" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", - "futures_codec", + "asynchronous-codec", + "bytes 1.0.1", + "futures 0.3.12", "libp2p-core", "log", "prost", "prost-build", - "unsigned-varint 0.5.1", + "unsigned-varint 0.6.0", "void", ] [[package]] name = "libp2p-pnet" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b3c2d5d26a9500e959a0e19743897239a6c4be78dadf99b70414301a70c006" +checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "log", - "pin-project 0.4.22", + "pin-project 1.0.4", "rand 0.7.3", "salsa20", - "sha3 0.9.1", + "sha3", ] [[package]] name = "libp2p-request-response" -version = "0.4.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02ba1aa5727ccc118c09ba5111480873f2fe5608cb304e258fd12c173ecf27c9" +checksum = "d37637a4b33b5390322ccc068a33897d0aa541daf4fec99f6a7efbf37295346e" dependencies = [ "async-trait", - "bytes 0.5.6", - "futures 0.3.5", + "bytes 1.0.1", + "futures 0.3.12", "libp2p-core", "libp2p-swarm", "log", - "lru 0.6.0", + "lru", "minicbor", "rand 0.7.3", - "smallvec 1.4.1", - "unsigned-varint 0.5.1", + "smallvec 1.6.1", + "unsigned-varint 0.6.0", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.23.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffa6fa33b16956b8a58afbfebe1406866011a1ab8960765bd36868952d7be6a1" +checksum = "d4f89ebb4d8953bda12623e9871959fe728dea3bf6eae0421dc9c42dc821e488" dependencies = [ "either", - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", "log", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", "void", "wasm-timer", ] [[package]] name = "libp2p-tcp" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0b6f4ef48d9493607fae069deecce0579320a1f3de6cb056770b151018a9a5" +checksum = "3dbd3d7076a478ac5a6aca55e74bdc250ac539b95de09b9d09915e0b8d01a6b2" dependencies = [ - "async-std", - "futures 0.3.5", + "async-io", + "futures 0.3.12", "futures-timer 3.0.2", - "if-addrs", + "if-watch", "ipnet", + "libc", "libp2p-core", "log", "socket2", @@ -3227,23 +3284,23 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945bed3c989a1b290b5a0d4e8fa6e44e01840efb9a5ab3f0d3d174f0e451ac0e" +checksum = "80ac51ce419f60be966e02103c17f67ff5dc4422ba83ba54d251d6c62a4ed487" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.23.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66518a4455e15c283637b4d7b579aef928b75a3fc6c50a41e7e6b9fa86672ca0" +checksum = "6149c46cb76935c80bc8be6ec6e3ebd5f5e1679765a255fb34331d54610f15dd" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3253,33 +3310,31 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc561870477523245efaaea1b6b743c70115f10c670e62bcbbe4d3153be5f0c" +checksum = "d3b1c6a3431045da8b925ed83384e4c5163e14b990572307fca9c507435d4d22" dependencies = [ - "async-tls", "either", - "futures 0.3.5", + "futures 0.3.12", + "futures-rustls", "libp2p-core", "log", "quicksink", - "rustls", "rw-stream-sink", "soketto", - "url 2.1.1", - "webpki", + "url 2.2.0", "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.26.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c0c9b6ef7a168c2ae854170b0b6b77550599afe06cc3ac390eb45c5d9c7110" +checksum = "490b8b27fc40fe35212df1b6a3d14bffaa4117cbff956fdc2892168a371102ad" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "libp2p-core", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "thiserror", "yamux", ] @@ -3308,27 +3363,26 @@ dependencies = [ "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", - "subtle 2.2.3", + "subtle 2.4.0", "typenum", ] [[package]] name = "libz-sys" -version = "1.0.25" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linked_hash_set" @@ -3341,20 +3395,19 @@ dependencies = [ [[package]] name = "linregress" -version = "0.1.7" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" +checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724" dependencies = [ - "failure", "nalgebra", "statrs", ] [[package]] name = "lite-json" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c73e713a23ac6e12074c9e96ef2dfb770921e0cb9244c093bd38424209e0e523" +checksum = "0460d985423a026b4d9b828a7c6eed1bcf606f476322f3f9b507529686a61715" dependencies = [ "lite-parser", ] @@ -3368,75 +3421,43 @@ dependencies = [ "paste 0.1.18", ] -[[package]] -name = "lock_api" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" -dependencies = [ - "scopeguard 0.3.3", -] - [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard 1.1.0", -] - -[[package]] -name = "lock_api" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" -dependencies = [ - "scopeguard 1.1.0", -] - -[[package]] -name = "log" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "cfg-if", + "scopeguard", ] [[package]] -name = "lru" -version = "0.4.3" +name = "lock_api" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" dependencies = [ - "hashbrown 0.6.3", + "scopeguard", ] [[package]] -name = "lru" -version = "0.5.3" +name = "log" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c456c123957de3a220cd03786e0d86aa542a88b46029973b542f426da6ef34" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "hashbrown 0.6.3", + "cfg-if 1.0.0", + "value-bag", ] [[package]] name = "lru" -version = "0.6.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +checksum = "3aae342b73d57ad0b8b364bd12584819f2c1fe9114285dfcf8b0722607671635" dependencies = [ - "hashbrown 0.8.1", + "hashbrown", ] -[[package]] -name = "lru_time_cache" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebac060fafad3adedd0c66a80741a92ff4bc8e94a273df2ba3770ab206f2e29a" - [[package]] name = "mach" version = "0.3.2" @@ -3469,9 +3490,9 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "matrixmultiply" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4f7ec66360130972f34830bfad9ef05c6610a43938a467bcc9ab9369ab3478f" +checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" dependencies = [ "rawpointer", ] @@ -3484,9 +3505,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memmap" @@ -3498,23 +3519,41 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "memmap2" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e73be3b7d04a0123e933fea1d50d126cc7196bbc0362c0ce426694f777194eee" +dependencies = [ + "libc", +] + +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" +checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" dependencies = [ - "autocfg 1.0.0", + "autocfg", ] [[package]] name = "memory-db" -version = "0.24.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" +checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4" dependencies = [ "hash-db", - "hashbrown 0.8.1", + "hashbrown", "parity-util-mem", ] @@ -3526,9 +3565,9 @@ checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] name = "merlin" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" dependencies = [ "byteorder", "keccak", @@ -3538,18 +3577,18 @@ dependencies = [ [[package]] name = "minicbor" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2ef6aa869726518c5d8206fa5d1337bda8a0442807611be617891c018fa781" +checksum = "3265a9f5210bb726f81ef9c456ae0aff5321cd95748c0e71889b0e19d8f0332b" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.5.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b3569c0dbfff1b8d5f1434c642b67f5bf81c0f354a3f5f8f180b549dba3c07c" +checksum = "130b9455e28a3f308f6579671816a6f2621e2e0cbf55dc2f886345bef699481e" dependencies = [ "proc-macro2", "quote", @@ -3558,27 +3597,28 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" dependencies = [ "adler", + "autocfg", ] [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", "kernel32-sys", "libc", "log", - "miow 0.2.1", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", @@ -3604,7 +3644,7 @@ checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", - "miow 0.3.5", + "miow 0.3.6", "winapi 0.3.9", ] @@ -3621,9 +3661,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -3633,9 +3673,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", "winapi 0.3.9", @@ -3647,55 +3687,83 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" +[[package]] +name = "multibase" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b78c60039650ff12e140ae867ef5299a58e19dded4d334c849dc7177083667e2" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + [[package]] name = "multihash" -version = "0.11.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" +checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" dependencies = [ "blake2b_simd", "blake2s_simd", - "digest 0.8.1", - "sha-1", - "sha2 0.8.2", - "sha3 0.8.2", - "unsigned-varint 0.3.3", + "blake3", + "digest 0.9.0", + "generic-array 0.14.4", + "multihash-derive", + "sha2 0.9.3", + "sha3", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "multihash-derive" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f" +dependencies = [ + "proc-macro-crate", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] name = "multimap" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" +checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" -version = "0.8.4" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a6aa6e32fbaf16795142335967214b8564a7a4661eb6dc846ef343a6e00ac1" +checksum = "10ddc0eb0117736f19d556355464fc87efc8ad98b29e3fd84f02531eb6e90840" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", + "bytes 1.0.1", + "futures 0.3.12", "log", - "pin-project 1.0.1", - "smallvec 1.4.1", - "unsigned-varint 0.5.1", + "pin-project 1.0.4", + "smallvec 1.6.1", + "unsigned-varint 0.6.0", ] [[package]] name = "nalgebra" -version = "0.18.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" +checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" dependencies = [ - "alga", "approx", - "generic-array 0.12.3", + "generic-array 0.13.2", "matrixmultiply", "num-complex", "num-rational", "num-traits", - "rand 0.6.5", + "rand 0.7.3", + "rand_distr", + "simba", "typenum", ] @@ -3708,28 +3776,37 @@ dependencies = [ "rand 0.3.23", ] +[[package]] +name = "nb-connect" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "net2" -version = "0.2.34" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] [[package]] name = "nix" -version = "0.17.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" +checksum = "b2ccba0cfe4fdf15982d1674c69b1fd80bad427d293849982668dfe454bd61f2" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", - "void", ] [[package]] @@ -3738,7 +3815,7 @@ version = "0.8.0" dependencies = [ "derive_more", "fs_extra", - "futures 0.3.5", + "futures 0.3.12", "hash-db", "hex", "kvdb", @@ -3774,7 +3851,7 @@ dependencies = [ name = "node-browser-testing" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "jsonrpc-core", "libp2p", @@ -3795,7 +3872,7 @@ dependencies = [ "frame-benchmarking-cli", "frame-support", "frame-system", - "futures 0.3.5", + "futures 0.3.12", "hex-literal", "log", "nix", @@ -3814,7 +3891,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "platforms", "rand 0.7.3", "regex", @@ -3827,7 +3904,9 @@ dependencies = [ "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", + "sc-consensus-slots", "sc-finality-grandpa", + "sc-finality-grandpa-warp-sync", "sc-keystore", "sc-network", "sc-offchain", @@ -3857,7 +3936,6 @@ dependencies = [ "substrate-build-script-utils", "substrate-frame-cli", "tempfile", - "tracing", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -3961,8 +4039,8 @@ dependencies = [ name = "node-rpc-client" version = "2.0.0" dependencies = [ - "futures 0.1.29", - "hyper 0.12.35", + "futures 0.1.30", + "hyper 0.13.9", "jsonrpc-core-client", "log", "node-primitives", @@ -3972,7 +4050,7 @@ dependencies = [ [[package]] name = "node-runtime" -version = "2.0.0" +version = "2.0.1" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3981,12 +4059,13 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "hex-literal", - "integer-sqrt", "node-primitives", + "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-balances", + "pallet-bounties", "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", @@ -3997,7 +4076,9 @@ dependencies = [ "pallet-identity", "pallet-im-online", "pallet-indices", + "pallet-lottery", "pallet-membership", + "pallet-mmr", "pallet-multisig", "pallet-offences", "pallet-offences-benchmarking", @@ -4012,6 +4093,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-sudo", "pallet-timestamp", + "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-treasury", @@ -4035,7 +4117,7 @@ dependencies = [ "sp-transaction-pool", "sp-version", "static_assertions", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -4054,9 +4136,11 @@ dependencies = [ "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", + "sc-keystore", "sc-rpc", "sc-rpc-api", "sc-service", + "sc-telemetry", "sc-transaction-pool", "sp-api", "sp-block-builder", @@ -4106,7 +4190,7 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -4117,7 +4201,7 @@ dependencies = [ "frame-support", "frame-system", "fs_extra", - "futures 0.3.5", + "futures 0.3.12", "log", "node-executor", "node-primitives", @@ -4181,7 +4265,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.0.0", + "autocfg", "num-integer", "num-traits", ] @@ -4192,17 +4276,17 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" dependencies = [ - "autocfg 1.0.0", + "autocfg", "num-traits", ] [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ - "autocfg 1.0.0", + "autocfg", "num-traits", ] @@ -4212,7 +4296,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "autocfg 1.0.0", + "autocfg", "num-bigint", "num-integer", "num-traits", @@ -4220,11 +4304,11 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ - "autocfg 1.0.0", + "autocfg", "libm", ] @@ -4240,44 +4324,34 @@ dependencies = [ [[package]] name = "object" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2" - -[[package]] -name = "object" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" dependencies = [ "crc32fast", "indexmap", - "wasmparser 0.57.0", ] [[package]] -name = "once_cell" -version = "0.1.8" +name = "object" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" -dependencies = [ - "parking_lot 0.7.1", -] +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" [[package]] name = "once_cell" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" dependencies = [ - "parking_lot 0.11.0", + "parking_lot 0.11.1", ] [[package]] name = "oorandom" -version = "11.1.2" +version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" @@ -4317,10 +4391,12 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-balances", "parity-scale-codec", "serde", "sp-core", @@ -4331,7 +4407,7 @@ dependencies = [ [[package]] name = "pallet-atomic-swap" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4346,7 +4422,7 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4354,12 +4430,11 @@ dependencies = [ "pallet-session", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "serde", "sp-application-crypto", "sp-consensus-aura", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-std", @@ -4368,7 +4443,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4386,12 +4461,13 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", "parity-scale-codec", + "serde", "sp-authorship", "sp-core", "sp-inherents", @@ -4402,7 +4478,7 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4431,7 +4507,7 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4445,9 +4521,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-bounties" +version = "3.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-treasury", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-storage", +] + [[package]] name = "pallet-collective" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4464,7 +4558,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0" +version = "2.0.1" dependencies = [ "assert_matches", "frame-benchmarking", @@ -4473,13 +4567,16 @@ dependencies = [ "hex-literal", "pallet-balances", "pallet-contracts-primitives", + "pallet-contracts-proc-macro", "pallet-randomness-collective-flip", "pallet-timestamp", "parity-scale-codec", "parity-wasm 0.41.0", - "paste 1.0.0", + "paste 1.0.4", "pretty_assertions", - "pwasm-utils", + "pwasm-utils 0.16.0", + "rand 0.7.3", + "rand_pcg", "serde", "sp-core", "sp-io", @@ -4492,7 +4589,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0" +version = "2.0.1" dependencies = [ "bitflags", "parity-scale-codec", @@ -4500,9 +4597,18 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-contracts-proc-macro" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pallet-contracts-rpc" -version = "0.8.0" +version = "0.8.1" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4521,7 +4627,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0" +version = "0.8.1" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4532,7 +4638,7 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4552,7 +4658,7 @@ dependencies = [ [[package]] name = "pallet-elections" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4568,7 +4674,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4585,28 +4691,6 @@ dependencies = [ "substrate-test-utils", ] -[[package]] -name = "pallet-evm" -version = "2.0.0" -dependencies = [ - "evm", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "pallet-balances", - "pallet-timestamp", - "parity-scale-codec", - "primitive-types", - "ripemd160", - "rlp", - "serde", - "sha3 0.8.2", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-example" version = "2.0.0" @@ -4625,7 +4709,7 @@ dependencies = [ [[package]] name = "pallet-example-offchain-worker" -version = "2.0.0" +version = "2.0.1" dependencies = [ "frame-support", "frame-system", @@ -4641,11 +4725,12 @@ dependencies = [ [[package]] name = "pallet-example-parallel" -version = "2.0.0" +version = "2.0.1" dependencies = [ "frame-support", "frame-system", "parity-scale-codec", + "serde", "sp-core", "sp-io", "sp-runtime", @@ -4655,7 +4740,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "2.0.0" +version = "3.0.0" dependencies = [ "finality-grandpa", "frame-benchmarking", @@ -4683,7 +4768,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0" +version = "3.0.0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4700,7 +4785,7 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4719,7 +4804,7 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4734,12 +4819,47 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-lottery" +version = "3.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-membership" -version = "2.0.0" +version = "3.0.0" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-mmr" +version = "3.0.0" dependencies = [ + "ckb-merkle-mountain-range", + "env_logger 0.8.2", + "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", + "pallet-mmr-primitives", "parity-scale-codec", "serde", "sp-core", @@ -4748,9 +4868,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-mmr-primitives" +version = "3.0.0" +dependencies = [ + "frame-support", + "frame-system", + "hex-literal", + "parity-scale-codec", + "serde", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-multisig" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4766,7 +4901,7 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4795,7 +4930,7 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4811,7 +4946,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4836,7 +4971,7 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "2.0.1" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4853,12 +4988,13 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", "parity-scale-codec", "safe-mix", + "serde", "sp-core", "sp-io", "sp-runtime", @@ -4867,7 +5003,7 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0" +version = "3.0.0" dependencies = [ "enumflags2", "frame-support", @@ -4883,7 +5019,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4899,7 +5035,7 @@ dependencies = [ [[package]] name = "pallet-scored-pool" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4914,7 +5050,7 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4935,7 +5071,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4957,7 +5093,7 @@ dependencies = [ [[package]] name = "pallet-society" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -4973,7 +5109,7 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4985,7 +5121,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "rand_chacha 0.2.2", "serde", "sp-application-crypto", @@ -5015,6 +5151,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", + "serde", "sp-core", "sp-io", "sp-npos-elections", @@ -5024,7 +5161,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5035,7 +5172,7 @@ dependencies = [ [[package]] name = "pallet-sudo" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", @@ -5054,6 +5191,7 @@ dependencies = [ "frame-support", "frame-system", "parity-scale-codec", + "serde", "sp-core", "sp-io", "sp-runtime", @@ -5061,7 +5199,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5077,17 +5215,35 @@ dependencies = [ "sp-timestamp", ] +[[package]] +name = "pallet-tips" +version = "3.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-treasury", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-storage", +] + [[package]] name = "pallet-transaction-payment" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", "pallet-balances", - "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "serde", - "smallvec 1.4.1", + "serde_json", + "smallvec 1.6.1", "sp-core", "sp-io", "sp-runtime", @@ -5097,14 +5253,13 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0" +version = "3.0.0" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", - "serde", "sp-api", "sp-blockchain", "sp-core", @@ -5114,24 +5269,22 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "frame-support", + "pallet-transaction-payment", "parity-scale-codec", - "serde", - "serde_json", "sp-api", "sp-runtime", - "sp-std", ] [[package]] name = "pallet-treasury" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "impl-trait-for-tuples", "pallet-balances", "parity-scale-codec", "serde", @@ -5144,7 +5297,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5160,7 +5313,7 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0" +version = "3.0.0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5179,23 +5332,25 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.1.2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00d595e372d119261593297debbe4193811a4dc811d2a1ccbb8caaa6666ad7ab" +checksum = "111e193c96758d476d272093a853882668da17489f76bf4361b8decae0b6c515" dependencies = [ "blake2-rfc", "crc32fast", + "hex", "libc", "log", - "memmap", - "parking_lot 0.10.2", + "memmap2", + "parking_lot 0.11.1", + "rand 0.8.3", ] [[package]] name = "parity-multiaddr" -version = "0.9.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7ad66970bbab360c97179b60906e2dc4aef1f7fca8ab4e5c5db8c97b16814a" +checksum = "8bfda2e46fc5e14122649e2645645a81ee5844e0fb2e727ef560cc71a8b2d801" dependencies = [ "arrayref", "bs58", @@ -5205,17 +5360,17 @@ dependencies = [ "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.5.1", - "url 2.1.1", + "unsigned-varint 0.6.0", + "url 2.2.0", ] [[package]] name = "parity-scale-codec" -version = "1.3.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d38aeaffc032ec69faa476b3caaca8d4dd7f3f798137ff30359e5c7869ceb6" +checksum = "75c823fdae1bb5ff5708ee61a62697e6296175dc671710876871c853f48592b3" dependencies = [ - "arrayvec 0.5.1", + "arrayvec 0.5.2", "bitvec", "byte-slice-cast", "parity-scale-codec-derive", @@ -5224,9 +5379,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "1.2.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd20ff7e0399b274a5f5bb37b712fccb5b3a64b9128200d1c3cc40fe709cb073" +checksum = "9029e65297c7fd6d7013f0579e193ec2b34ae78eabca854c9417504ad8a2d214" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5247,11 +5402,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "libc", "log", "mio-named-pipes", - "miow 0.3.5", + "miow 0.3.6", "rand 0.7.3", "tokio 0.1.22", "tokio-named-pipes", @@ -5261,19 +5416,17 @@ dependencies = [ [[package]] name = "parity-util-mem" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" +checksum = "664a8c6b8e62d8f9f2f937e391982eb433ab285b4cd9545b342441e04a906e42" dependencies = [ - "cfg-if", - "ethereum-types", - "hashbrown 0.8.1", + "cfg-if 1.0.0", + "hashbrown", "impl-trait-for-tuples", - "lru 0.5.3", "parity-util-mem-derive", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "primitive-types", - "smallvec 1.4.1", + "smallvec 1.6.1", "winapi 0.3.9", ] @@ -5316,9 +5469,9 @@ dependencies = [ "mio", "mio-extras", "rand 0.7.3", - "sha-1", + "sha-1 0.8.2", "slab", - "url 2.1.1", + "url 2.2.0", ] [[package]] @@ -5327,16 +5480,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "parking_lot" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" -dependencies = [ - "lock_api 0.1.5", - "parking_lot_core 0.4.0", -] - [[package]] name = "parking_lot" version = "0.9.0" @@ -5360,26 +5503,13 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", - "lock_api 0.4.1", - "parking_lot_core 0.8.0", -] - -[[package]] -name = "parking_lot_core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" -dependencies = [ - "libc", - "rand 0.6.5", - "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.9", + "lock_api 0.4.2", + "parking_lot_core 0.8.2", ] [[package]] @@ -5388,12 +5518,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ - "cfg-if", - "cloudabi 0.0.3", + "cfg-if 0.1.10", + "cloudabi", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "rustc_version", - "smallvec 0.6.13", + "smallvec 0.6.14", "winapi 0.3.9", ] @@ -5403,26 +5533,25 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "cfg-if", - "cloudabi 0.0.3", + "cfg-if 0.1.10", + "cloudabi", "libc", - "redox_syscall", - "smallvec 1.4.1", + "redox_syscall 0.1.57", + "smallvec 1.6.1", "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if", - "cloudabi 0.1.0", + "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", - "smallvec 1.4.1", + "redox_syscall 0.1.57", + "smallvec 1.6.1", "winapi 0.3.9", ] @@ -5438,9 +5567,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ddc8e145de01d9180ac7b78b9676f95a9c2447f6a88b2c2a04702211bc5d71" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "paste-impl" @@ -5459,7 +5588,15 @@ checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ "byteorder", "crypto-mac 0.7.0", - "rayon", +] + +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", ] [[package]] @@ -5526,7 +5663,7 @@ checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" dependencies = [ "maplit", "pest", - "sha-1", + "sha-1 0.8.2", ] [[package]] @@ -5541,27 +5678,27 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" dependencies = [ - "pin-project-internal 0.4.22", + "pin-project-internal 0.4.27", ] [[package]] name = "pin-project" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" dependencies = [ - "pin-project-internal 1.0.1", + "pin-project-internal 1.0.4", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ "proc-macro2", "quote", @@ -5570,9 +5707,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" dependencies = [ "proc-macro2", "quote", @@ -5581,9 +5718,15 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" + +[[package]] +name = "pin-project-lite" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -5593,71 +5736,89 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "platforms" -version = "0.2.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" +checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" [[package]] name = "plotters" -version = "0.2.15" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb" +checksum = "45ca0ae5f169d0917a7c7f5a9c1a3d3d9598f18f529dd2b8373ed988efea307a" dependencies = [ - "js-sys", "num-traits", + "plotters-backend", + "plotters-svg", "wasm-bindgen", "web-sys", ] +[[package]] +name = "plotters-backend" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07fffcddc1cb3a1de753caa4e4df03b79922ba43cf882acc1bdd7e8df9f4590" + +[[package]] +name = "plotters-svg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b38a02e23bd9604b842a812063aec4ef702b57989c37b655254bb61c471ad211" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polling" -version = "1.1.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" +checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "log", - "wepoll-sys-stjepang", + "wepoll-sys", "winapi 0.3.9", ] [[package]] name = "poly1305" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b42192ab143ed7619bf888a7f9c6733a9a2153b218e2cd557cfdb52fbf9bb1" +checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" dependencies = [ + "cpuid-bool 0.2.0", "universal-hash", ] [[package]] name = "polyval" -version = "0.4.0" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" +checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" dependencies = [ - "cfg-if", + "cpuid-bool 0.2.0", + "opaque-debug 0.3.0", "universal-hash", ] [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "predicates" -version = "1.0.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96bfead12e90dccead362d62bb2c90a5f6fc4584963645bc7f71a735e0b0735a" +checksum = "eeb433456c1a57cc93554dea3ce40b4c19c4057e41c55d4a0f3d84ea71c325aa" dependencies = [ "difference", "predicates-core", @@ -5665,15 +5826,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06075c3a3e92559ff8929e7a280684489ea27fe44805174c3ebd9328dcb37178" +checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" [[package]] name = "predicates-tree" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e63c4859013b38a76eca2414c64911fba30def9e3202ac461a2d22831220124" +checksum = "15f553275e5721409451eb85e15fd9a860a6e5ab4496eb215987502b5f5391f2" dependencies = [ "predicates-core", "treeline", @@ -5693,13 +5854,12 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.7.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55c21c64d0eaa4d7ed885d959ef2d62d9e488c27c0e02d9aa5ce6c877b7d5f8" +checksum = "2415937401cb030a2a0a4d922483f945fa068f52a7dbb22ce0fe5f2b6f6adace" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", "uint", ] @@ -5715,9 +5875,9 @@ dependencies = [ [[package]] name = "proc-macro-error" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", @@ -5728,28 +5888,26 @@ dependencies = [ [[package]] name = "proc-macro-error-attr" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", - "syn", - "syn-mid", "version_check", ] [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -5762,54 +5920,54 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d70cf4412832bcac9cffe27906f4a66e450d323525e977168c70d1b36120ae" +checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fnv", "lazy_static", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "regex", "thiserror", ] [[package]] name = "prost" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "prost-derive", ] [[package]] name = "prost-build" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" +checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "heck", - "itertools 0.8.2", + "itertools 0.9.0", "log", "multimap", "petgraph", "prost", "prost-types", "tempfile", - "which", + "which 4.0.2", ] [[package]] name = "prost-derive" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", - "itertools 0.8.2", + "itertools 0.9.0", "proc-macro2", "quote", "syn", @@ -5817,14 +5975,23 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "prost", ] +[[package]] +name = "psm" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a" +dependencies = [ + "cc", +] + [[package]] name = "pwasm-utils" version = "0.14.0" @@ -5836,6 +6003,17 @@ dependencies = [ "parity-wasm 0.41.0", ] +[[package]] +name = "pwasm-utils" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c8ac87af529432d3a4f0e2b3bbf08af49f28f09cc73ed7e551161bdaef5f78d" +dependencies = [ + "byteorder", + "log", + "parity-wasm 0.41.0", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -5850,14 +6028,13 @@ checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" [[package]] name = "quickcheck" -version = "0.9.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "env_logger", + "env_logger 0.8.2", "log", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.3", ] [[package]] @@ -5868,23 +6045,23 @@ checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" dependencies = [ "futures-core", "futures-sink", - "pin-project-lite", + "pin-project-lite 0.1.11", ] [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] [[package]] name = "radium" -version = "0.3.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" [[package]] name = "rand" @@ -5909,60 +6086,30 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "rand" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -dependencies = [ - "cloudabi 0.0.3", - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "winapi 0.3.9", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg 0.1.2", - "rand_xorshift", - "winapi 0.3.9", -] - [[package]] name = "rand" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.14", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc 0.2.0", - "rand_pcg 0.2.1", + "rand_pcg", ] [[package]] -name = "rand_chacha" -version = "0.1.1" +name = "rand" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.1", + "rand_hc 0.3.0", ] [[package]] @@ -5975,6 +6122,16 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.1", +] + [[package]] name = "rand_core" version = "0.3.1" @@ -5996,70 +6153,43 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.14", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom 0.1.16", ] [[package]] -name = "rand_isaac" -version = "0.1.1" +name = "rand_core" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "rand_core 0.3.1", + "getrandom 0.2.2", ] [[package]] -name = "rand_jitter" -version = "0.1.4" +name = "rand_distr" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi 0.3.9", + "rand 0.7.3", ] [[package]] -name = "rand_os" -version = "0.1.3" +name = "rand_hc" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "cloudabi 0.0.3", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "wasm-bindgen", - "winapi 0.3.9", + "rand_core 0.5.1", ] [[package]] -name = "rand_pcg" -version = "0.1.2" +name = "rand_hc" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", + "rand_core 0.6.1", ] [[package]] @@ -6071,20 +6201,11 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "raw-cpuid" -version = "7.0.3" +version = "8.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a349ca83373cfa5d6dbb66fd76e58b2cca08da71a5f6400de0a0a6a9bceeaf" +checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73" dependencies = [ "bitflags", "cc", @@ -6099,25 +6220,25 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080" +checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ - "autocfg 1.0.0", - "crossbeam-deque", + "autocfg", + "crossbeam-deque 0.8.0", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.7.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280" +checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ - "crossbeam-deque", - "crossbeam-queue", - "crossbeam-utils", + "crossbeam-channel", + "crossbeam-deque 0.8.0", + "crossbeam-utils 0.8.1", "lazy_static", "num_cpus", ] @@ -6137,31 +6258,50 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.14", - "redox_syscall", + "getrandom 0.1.16", + "redox_syscall 0.1.57", "rust-argon2", ] +[[package]] +name = "redox_users" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +dependencies = [ + "getrandom 0.2.2", + "redox_syscall 0.2.4", +] + [[package]] name = "ref-cast" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -6170,20 +6310,20 @@ dependencies = [ [[package]] name = "regalloc" -version = "0.0.27" +version = "0.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ba8aaf5fe7cf307c6dbdaeed85478961d29e25e3bee5169e11b92fa9f027a8" +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ "log", "rustc-hash", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] name = "regex" -version = "1.3.9" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -6203,9 +6343,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "region" @@ -6230,45 +6370,25 @@ dependencies = [ [[package]] name = "retain_mut" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e005d658ad26eacc2b6c506dfde519f4e277e328d0eb3379ca61647d70a8f531" +checksum = "53552c6c49e1e13f1a203ef0080ab3bbef0beb570a528993e83df057a9d9bba1" [[package]] name = "ring" -version = "0.16.15" +version = "0.16.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" dependencies = [ "cc", "libc", - "once_cell 1.4.1", + "once_cell", "spin", "untrusted", "web-sys", "winapi 0.3.9", ] -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "rlp" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a7d3f9bed94764eac15b8f14af59fac420c236adaff743b7bcc88e265cb4345" -dependencies = [ - "rustc-hex", -] - [[package]] name = "rocksdb" version = "0.15.0" @@ -6281,9 +6401,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" +checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" dependencies = [ "libc", "winapi 0.3.9", @@ -6291,21 +6411,21 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.7.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.11.0", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", + "crossbeam-utils 0.8.1", ] [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" [[package]] name = "rustc-hash" @@ -6330,9 +6450,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ "base64 0.12.3", "log", @@ -6341,6 +6461,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +dependencies = [ + "base64 0.13.0", + "log", + "ring", + "sct", + "webpki", +] + [[package]] name = "rustls-native-certs" version = "0.4.0" @@ -6348,21 +6481,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.18.1", "schannel", "security-framework", ] [[package]] name = "rustversion" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" [[package]] name = "rw-stream-sink" @@ -6370,8 +6498,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.5", - "pin-project 0.4.22", + "futures 0.3.12", + "pin-project 0.4.27", "static_assertions", ] @@ -6392,11 +6520,11 @@ dependencies = [ [[package]] name = "salsa20" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f47b10fa80f6969bbbd9c8e7cc998f082979d402a9e10579e2303a87955395" +checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" dependencies = [ - "stream-cipher 0.7.1", + "cipher", ] [[package]] @@ -6410,13 +6538,12 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "0.8.0" +version = "0.9.0" dependencies = [ "async-trait", - "bytes 0.5.6", "derive_more", "either", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "libp2p", "log", @@ -6426,7 +6553,6 @@ dependencies = [ "quickcheck", "rand 0.7.3", "sc-client-api", - "sc-keystore", "sc-network", "sc-peerset", "serde_json", @@ -6443,13 +6569,13 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0" +version = "0.9.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -6464,12 +6590,11 @@ dependencies = [ "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "tokio-executor 0.2.0-alpha.6", ] [[package]] name = "sc-block-builder" -version = "0.8.0" +version = "0.9.0" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6487,7 +6612,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "2.0.0" +version = "3.0.0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -6507,7 +6632,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6517,14 +6642,11 @@ dependencies = [ [[package]] name = "sc-cli" -version = "0.8.0" +version = "0.9.0" dependencies = [ - "ansi_term 0.12.1", - "atty", - "bip39", "chrono", "fdlimit", - "futures 0.3.5", + "futures 0.3.12", "hex", "libp2p", "log", @@ -6533,7 +6655,6 @@ dependencies = [ "rand 0.7.3", "regex", "rpassword", - "sc-cli-proc-macro", "sc-client-api", "sc-keystore", "sc-network", @@ -6553,39 +6674,25 @@ dependencies = [ "structopt", "tempfile", "thiserror", - "tokio 0.2.22", - "tracing", - "tracing-log", - "tracing-subscriber", -] - -[[package]] -name = "sc-cli-proc-macro" -version = "2.0.0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", + "tiny-bip39", + "tokio 0.2.25", ] [[package]] name = "sc-client-api" -version = "2.0.0" +version = "3.0.0" dependencies = [ "derive_more", "fnv", - "futures 0.3.5", + "futures 0.3.12", "hash-db", - "hex-literal", "kvdb", "kvdb-memorydb", "lazy_static", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-executor", - "sc-telemetry", "sp-api", "sp-blockchain", "sp-consensus", @@ -6593,7 +6700,6 @@ dependencies = [ "sp-database", "sp-externalities", "sp-inherents", - "sp-keyring", "sp-keystore", "sp-runtime", "sp-state-machine", @@ -6606,11 +6712,12 @@ dependencies = [ "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime", + "thiserror", ] [[package]] name = "sc-client-db" -version = "0.8.0" +version = "0.9.0" dependencies = [ "blake2-rfc", "hash-db", @@ -6622,7 +6729,7 @@ dependencies = [ "parity-db", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "quickcheck", "sc-client-api", "sc-executor", @@ -6644,7 +6751,7 @@ dependencies = [ [[package]] name = "sc-consensus" -version = "0.8.0" +version = "0.9.0" dependencies = [ "sc-client-api", "sp-blockchain", @@ -6654,14 +6761,15 @@ dependencies = [ [[package]] name = "sc-consensus-aura" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", + "getrandom 0.2.2", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", "sc-consensus-slots", @@ -6677,6 +6785,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-aura", + "sp-consensus-slots", "sp-core", "sp-inherents", "sp-io", @@ -6693,11 +6802,11 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", "fork-tree", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "log", "merlin", @@ -6705,7 +6814,7 @@ dependencies = [ "num-rational", "num-traits", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "pdqselect", "rand 0.7.3", "rand_chacha 0.2.2", @@ -6729,6 +6838,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-slots", "sp-consensus-vrf", "sp-core", "sp-inherents", @@ -6747,10 +6857,10 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.12", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6776,11 +6886,11 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0" +version = "0.9.0" dependencies = [ "fork-tree", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-client-api", "sp-blockchain", "sp-runtime", @@ -6788,16 +6898,17 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0" +version = "0.9.0" dependencies = [ "assert_matches", "derive_more", - "futures 0.3.5", + "futures 0.3.12", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "log", - "parking_lot 0.10.2", + "parity-scale-codec", + "parking_lot 0.11.1", "sc-basic-authorship", "sc-client-api", "sc-consensus-babe", @@ -6808,8 +6919,10 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-slots", "sp-core", "sp-inherents", + "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", @@ -6818,19 +6931,19 @@ dependencies = [ "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "tempfile", - "tokio 0.2.22", + "tokio 0.2.25", ] [[package]] name = "sc-consensus-pow" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-client-api", "sp-api", "sp-block-builder", @@ -6846,17 +6959,18 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0" +version = "0.9.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-client-api", "sc-telemetry", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-consensus-slots", @@ -6866,11 +6980,12 @@ dependencies = [ "sp-state-machine", "sp-trie", "substrate-test-runtime-client", + "thiserror", ] [[package]] name = "sc-consensus-uncles" -version = "0.8.0" +version = "0.9.0" dependencies = [ "log", "sc-client-api", @@ -6883,7 +6998,7 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0" +version = "0.9.0" dependencies = [ "assert_matches", "derive_more", @@ -6893,7 +7008,8 @@ dependencies = [ "log", "parity-scale-codec", "parity-wasm 0.41.0", - "parking_lot 0.10.2", + "parking_lot 0.11.1", + "paste 1.0.4", "sc-executor-common", "sc-executor-wasmi", "sc-executor-wasmtime", @@ -6914,7 +7030,6 @@ dependencies = [ "sp-version", "sp-wasm-interface", "substrate-test-runtime", - "test-case", "tracing", "tracing-subscriber", "wasmi", @@ -6923,23 +7038,22 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", - "log", "parity-scale-codec", "parity-wasm 0.41.0", "sp-allocator", "sp-core", - "sp-runtime-interface", "sp-serializer", "sp-wasm-interface", + "thiserror", "wasmi", ] [[package]] name = "sc-executor-wasmi" -version = "0.8.0" +version = "0.9.0" dependencies = [ "log", "parity-scale-codec", @@ -6953,13 +7067,13 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0" +version = "0.9.0" dependencies = [ "assert_matches", "log", "parity-scale-codec", "parity-wasm 0.41.0", - "pwasm-utils", + "pwasm-utils 0.14.0", "sc-executor-common", "scoped-tls", "sp-allocator", @@ -6971,18 +7085,19 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" -version = "0.8.0" +version = "0.9.0" dependencies = [ "assert_matches", "derive_more", "finality-grandpa", "fork-tree", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", + "linked-hash-map", "log", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project 0.4.22", + "parking_lot 0.11.1", + "pin-project 1.0.4", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -7011,16 +7126,16 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.22", + "tokio 0.2.25", ] [[package]] name = "sc-finality-grandpa-rpc" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.5", + "futures 0.3.12", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7045,11 +7160,30 @@ dependencies = [ ] [[package]] -name = "sc-informant" +name = "sc-finality-grandpa-warp-sync" version = "0.8.0" +dependencies = [ + "derive_more", + "futures 0.3.12", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.11.1", + "prost", + "sc-client-api", + "sc-finality-grandpa", + "sc-network", + "sc-service", + "sp-blockchain", + "sp-runtime", +] + +[[package]] +name = "sc-informant" +version = "0.9.0" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.5", + "futures 0.3.12", "log", "parity-util-mem", "sc-client-api", @@ -7063,32 +7197,32 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0" +version = "3.0.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.12", "futures-util", "hex", "merlin", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "rand 0.7.3", "serde_json", "sp-application-crypto", "sp-core", "sp-keystore", - "subtle 2.2.3", + "subtle 2.4.0", "tempfile", ] [[package]] name = "sc-light" -version = "2.0.0" +version = "3.0.0" dependencies = [ "hash-db", "lazy_static", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-client-api", "sc-executor", "sp-api", @@ -7101,33 +7235,34 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0" +version = "0.9.0" dependencies = [ "assert_matches", "async-std", "async-trait", + "asynchronous-codec", "bitflags", "bs58", - "bytes 0.5.6", + "bytes 1.0.1", + "cid", "derive_more", "either", "erased-serde", "fnv", "fork-tree", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", - "futures_codec", "hex", "ip_network", "libp2p", "linked-hash-map", "linked_hash_set", "log", - "lru 0.4.3", + "lru", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project 0.4.22", + "parking_lot 0.11.1", + "pin-project 1.0.4", "prost", "prost-build", "quickcheck", @@ -7137,9 +7272,7 @@ dependencies = [ "sc-peerset", "serde", "serde_json", - "slog", - "slog_derive", - "smallvec 0.6.13", + "smallvec 1.6.1", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -7154,7 +7287,7 @@ dependencies = [ "substrate-test-runtime-client", "tempfile", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint 0.6.0", "void", "wasm-timer", "zeroize", @@ -7162,18 +7295,19 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.8.0" +version = "0.9.0" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "libp2p", "log", - "lru 0.4.3", + "lru", "quickcheck", "rand 0.7.3", "sc-network", "sp-runtime", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "wasm-timer", ] @@ -7182,11 +7316,12 @@ dependencies = [ name = "sc-network-test" version = "0.8.0" dependencies = [ - "futures 0.3.5", + "async-std", + "futures 0.3.12", "futures-timer 3.0.2", "libp2p", "log", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -7206,26 +7341,28 @@ dependencies = [ [[package]] name = "sc-offchain" -version = "2.0.0" +version = "3.0.0" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", - "hyper 0.13.7", + "hyper 0.13.9", "hyper-rustls", "lazy_static", "log", "num_cpus", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "rand 0.7.3", + "sc-block-builder", "sc-client-api", "sc-client-db", "sc-keystore", "sc-network", "sc-transaction-pool", "sp-api", + "sp-consensus", "sp-core", "sp-offchain", "sp-runtime", @@ -7234,14 +7371,14 @@ dependencies = [ "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.22", + "tokio 0.2.25", ] [[package]] name = "sc-peerset" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "libp2p", "log", "rand 0.7.3", @@ -7252,7 +7389,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.8.0" +version = "0.9.0" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7260,24 +7397,26 @@ dependencies = [ [[package]] name = "sc-rpc" -version = "2.0.0" +version = "3.0.0" dependencies = [ "assert_matches", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.12", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-block-builder", + "sc-cli", "sc-client-api", "sc-executor", "sc-keystore", "sc-network", "sc-rpc-api", + "sc-tracing", "sc-transaction-pool", "serde_json", "sp-api", @@ -7300,17 +7439,17 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.12", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "serde", "serde_json", "sp-chain-spec", @@ -7323,9 +7462,9 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", @@ -7349,19 +7488,18 @@ dependencies = [ "sp-sandbox", "sp-std", "sp-tasks", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "sc-service" -version = "0.8.0" +version = "0.9.0" dependencies = [ "async-std", - "derive_more", "directories", "exit-future", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.12", "futures-timer 3.0.2", "hash-db", "jsonrpc-core", @@ -7370,8 +7508,8 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", - "pin-project 0.4.22", + "parking_lot 0.11.1", + "pin-project 1.0.4", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -7391,7 +7529,6 @@ dependencies = [ "sc-transaction-pool", "serde", "serde_json", - "slog", "sp-api", "sp-application-crypto", "sp-block-builder", @@ -7416,9 +7553,11 @@ dependencies = [ "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.22", + "thiserror", + "tokio 0.2.25", "tracing", "tracing-futures", + "tracing-subscriber", "wasm-timer", ] @@ -7427,12 +7566,12 @@ name = "sc-service-test" version = "2.0.0" dependencies = [ "fdlimit", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.12", "hex-literal", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -7460,20 +7599,21 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0" +version = "0.9.0" dependencies = [ "log", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-client-api", "sp-core", + "thiserror", ] [[package]] name = "sc-sync-state-rpc" -version = "0.8.0" +version = "0.9.0" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -7487,59 +7627,80 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] name = "sc-telemetry" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "futures 0.3.5", - "futures-timer 3.0.2", + "chrono", + "futures 0.3.12", "libp2p", "log", - "parking_lot 0.10.2", - "pin-project 0.4.22", + "parking_lot 0.11.1", + "pin-project 1.0.4", "rand 0.7.3", "serde", - "slog", - "slog-json", - "slog-scope", + "serde_json", + "sp-utils", "take_mut", + "tracing", + "tracing-subscriber", "void", "wasm-timer", ] [[package]] name = "sc-tracing" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "ansi_term 0.12.1", + "atty", "erased-serde", + "lazy_static", "log", - "parking_lot 0.10.2", + "once_cell", + "parking_lot 0.11.1", + "regex", "rustc-hash", "sc-telemetry", + "sc-tracing-proc-macro", "serde", "serde_json", - "slog", "sp-tracing", + "thiserror", "tracing", "tracing-core", + "tracing-log", "tracing-subscriber", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "sc-tracing-proc-macro" +version = "3.0.0" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "sc-transaction-graph" -version = "2.0.0" +version = "3.0.0" dependencies = [ "assert_matches", "criterion", "derive_more", - "futures 0.3.5", + "futures 0.3.12", "linked-hash-map", "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "retain_mut", "serde", "sp-blockchain", @@ -7548,23 +7709,23 @@ dependencies = [ "sp-transaction-pool", "sp-utils", "substrate-test-runtime", + "thiserror", "wasm-timer", ] [[package]] name = "sc-transaction-pool" -version = "2.0.0" +version = "3.0.0" dependencies = [ "assert_matches", - "derive_more", - "futures 0.3.5", + "futures 0.3.12", "futures-diagnose", "hex", "intervalier", "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", "sc-transaction-graph", @@ -7580,6 +7741,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", + "thiserror", "wasm-timer", ] @@ -7600,14 +7762,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ "arrayref", - "arrayvec 0.5.1", - "curve25519-dalek 2.1.0", - "getrandom 0.1.14", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.2", + "getrandom 0.1.16", "merlin", "rand 0.7.3", "rand_core 0.5.1", + "serde", "sha2 0.8.2", - "subtle 2.2.3", + "subtle 2.4.0", "zeroize", ] @@ -7617,12 +7780,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" -[[package]] -name = "scopeguard" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" - [[package]] name = "scopeguard" version = "1.1.0" @@ -7631,18 +7788,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scroll" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1" +checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" dependencies = [ "scroll_derive", ] [[package]] name = "scroll_derive" -version = "0.10.2" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" +checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" dependencies = [ "proc-macro2", "quote", @@ -7661,9 +7818,9 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" dependencies = [ "zeroize", ] @@ -7697,7 +7854,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] @@ -7706,16 +7863,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] name = "semver" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.2", "serde", ] @@ -7726,28 +7883,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.2.0" +name = "semver-parser" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] [[package]] name = "send_wrapper" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686ef91cf020ad8d4aca9a7047641fd6add626b7b89e14546c2b6a76781cf822" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "send_wrapper" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" [[package]] name = "serde" -version = "1.0.116" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] @@ -7764,9 +7924,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.116" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -7775,9 +7935,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -7796,6 +7956,19 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "sha-1" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool 0.1.2", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + [[package]] name = "sha2" version = "0.8.2" @@ -7810,30 +7983,17 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" +checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" dependencies = [ "block-buffer 0.9.0", - "cfg-if", - "cpuid-bool", + "cfg-if 1.0.0", + "cpuid-bool 0.1.2", "digest 0.9.0", "opaque-debug 0.3.0", ] -[[package]] -name = "sha3" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" -dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", - "keccak", - "opaque-debug 0.2.3", -] - [[package]] name = "sha3" version = "0.9.1" @@ -7848,9 +8008,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.0.9" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" dependencies = [ "lazy_static", ] @@ -7862,91 +8022,68 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] -name = "signal-hook-registry" -version = "1.2.0" +name = "signal-hook" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" dependencies = [ - "arc-swap", "libc", + "signal-hook-registry", ] [[package]] -name = "signature" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "slog" -version = "2.5.2" +name = "signal-hook-registry" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc9c640a4adbfbcc11ffb95efe5aa7af7309e002adab54b185507dbf2377b99" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ - "erased-serde", + "libc", ] [[package]] -name = "slog-json" -version = "2.3.0" +name = "signature" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc0d2aff1f8f325ef660d9a0eb6e6dcd20b30b3f581a5897f58bf42d061c37a" -dependencies = [ - "chrono", - "erased-serde", - "serde", - "serde_json", - "slog", -] +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" [[package]] -name = "slog-scope" -version = "4.3.0" +name = "simba" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c44c89dd8b0ae4537d1ae318353eaf7840b4869c536e31c41e963d1ea523ee6" +checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2" dependencies = [ - "arc-swap", - "lazy_static", - "slog", + "approx", + "num-complex", + "num-traits", + "paste 0.1.18", ] [[package]] -name = "slog_derive" -version = "0.2.0" +name = "slab" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" dependencies = [ "maybe-uninit", ] [[package]] name = "smallvec" -version = "1.4.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "snow" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32bf8474159a95551661246cda4976e89356999e3cbfef36f493dacc3fae1e8e" +checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" dependencies = [ "aes-gcm", "blake2", @@ -7955,53 +8092,52 @@ dependencies = [ "rand_core 0.5.1", "ring", "rustc_version", - "sha2 0.9.1", - "subtle 2.2.3", - "x25519-dalek 0.6.0", + "sha2 0.9.3", + "subtle 2.4.0", + "x25519-dalek", ] [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] [[package]] name = "soketto" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" +checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.5", + "futures 0.3.12", "httparse", "log", "rand 0.7.3", - "sha-1", + "sha-1 0.9.2", ] [[package]] name = "sp-allocator" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "derive_more", "log", "sp-core", "sp-std", "sp-wasm-interface", + "thiserror", ] [[package]] name = "sp-api" -version = "2.0.0" +version = "3.0.0" dependencies = [ "hash-db", "parity-scale-codec", @@ -8012,11 +8148,12 @@ dependencies = [ "sp-std", "sp-test-primitives", "sp-version", + "thiserror", ] [[package]] name = "sp-api-proc-macro" -version = "2.0.0" +version = "3.0.0" dependencies = [ "blake2-rfc", "proc-macro-crate", @@ -8027,7 +8164,7 @@ dependencies = [ [[package]] name = "sp-api-test" -version = "2.0.0" +version = "2.0.1" dependencies = [ "criterion", "parity-scale-codec", @@ -8046,7 +8183,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "serde", @@ -8069,7 +8206,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0" +version = "3.0.0" dependencies = [ "criterion", "integer-sqrt", @@ -8096,7 +8233,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -8107,7 +8244,7 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -8117,7 +8254,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -8128,13 +8265,14 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "futures 0.3.12", "log", - "lru 0.4.3", + "lru", "parity-scale-codec", - "parking_lot 0.10.2", - "sp-block-builder", + "parking_lot 0.11.1", + "sp-api", "sp-consensus", "sp-database", "sp-runtime", @@ -8144,7 +8282,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" -version = "2.0.0" +version = "3.0.0" dependencies = [ "serde", "serde_json", @@ -8152,14 +8290,14 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.8.0" +version = "0.9.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "futures-timer 3.0.2", "libp2p", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "serde", "sp-api", "sp-core", @@ -8178,11 +8316,12 @@ dependencies = [ [[package]] name = "sp-consensus-aura" -version = "0.8.0" +version = "0.9.0" dependencies = [ "parity-scale-codec", "sp-api", "sp-application-crypto", + "sp-consensus-slots", "sp-inherents", "sp-runtime", "sp-std", @@ -8191,7 +8330,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0" +version = "0.9.0" dependencies = [ "merlin", "parity-scale-codec", @@ -8210,7 +8349,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0" +version = "0.9.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -8221,15 +8360,16 @@ dependencies = [ [[package]] name = "sp-consensus-slots" -version = "0.8.0" +version = "0.9.0" dependencies = [ "parity-scale-codec", + "sp-arithmetic", "sp-runtime", ] [[package]] name = "sp-consensus-vrf" -version = "0.8.0" +version = "0.9.0" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -8240,7 +8380,7 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0" +version = "3.0.0" dependencies = [ "base58", "blake2-rfc", @@ -8248,7 +8388,7 @@ dependencies = [ "criterion", "dyn-clonable", "ed25519-dalek", - "futures 0.3.5", + "futures 0.3.12", "hash-db", "hash256-std-hasher", "hex", @@ -8261,7 +8401,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "pretty_assertions", "primitive-types", "rand 0.7.3", @@ -8271,7 +8411,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "sha2 0.8.2", + "sha2 0.9.3", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -8289,24 +8429,35 @@ dependencies = [ [[package]] name = "sp-database" -version = "2.0.0" +version = "3.0.0" dependencies = [ "kvdb", - "parking_lot 0.10.2", + "parking_lot 0.11.1", ] [[package]] name = "sp-debug-derive" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "sp-election-providers" +version = "3.0.0" +dependencies = [ + "parity-scale-codec", + "sp-arithmetic", + "sp-npos-elections", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-externalities" -version = "0.8.0" +version = "0.9.0" dependencies = [ "environmental", "parity-scale-codec", @@ -8316,7 +8467,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0" +version = "3.0.0" dependencies = [ "finality-grandpa", "log", @@ -8332,10 +8483,10 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sp-core", "sp-std", "thiserror", @@ -8343,14 +8494,14 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "hash-db", "libsecp256k1", "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sp-core", "sp-externalities", "sp-keystore", @@ -8366,7 +8517,7 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0" +version = "3.0.0" dependencies = [ "lazy_static", "sp-core", @@ -8376,29 +8527,31 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.8.0" +version = "0.9.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.12", "merlin", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "rand 0.7.3", "rand_chacha 0.2.2", "schnorrkel", + "serde", "sp-core", "sp-externalities", ] [[package]] name = "sp-npos-elections" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "rand 0.7.3", "serde", "sp-arithmetic", + "sp-core", "sp-npos-elections-compact", "sp-runtime", "sp-std", @@ -8407,7 +8560,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8429,7 +8582,7 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "2.0.0" +version = "3.0.0" dependencies = [ "sp-api", "sp-core", @@ -8439,15 +8592,14 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0" +version = "3.0.0" dependencies = [ "backtrace", - "log", ] [[package]] name = "sp-rpc" -version = "2.0.0" +version = "3.0.0" dependencies = [ "serde", "serde_json", @@ -8456,7 +8608,7 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0" +version = "3.0.0" dependencies = [ "either", "hash256-std-hasher", @@ -8464,14 +8616,13 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "paste 0.1.18", + "paste 1.0.4", "rand 0.7.3", "serde", "serde_json", "sp-application-crypto", "sp-arithmetic", "sp-core", - "sp-inherents", "sp-io", "sp-state-machine", "sp-std", @@ -8479,8 +8630,9 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", "rustversion", @@ -8500,7 +8652,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0" +version = "3.0.0" dependencies = [ "Inflector", "proc-macro-crate", @@ -8533,7 +8685,7 @@ dependencies = [ "sp-io", "sp-runtime-interface", "sp-std", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -8544,12 +8696,12 @@ dependencies = [ "sp-io", "sp-runtime-interface", "sp-std", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "sp-sandbox" -version = "0.8.0" +version = "0.9.0" dependencies = [ "assert_matches", "parity-scale-codec", @@ -8563,7 +8715,7 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "2.0.0" +version = "3.0.0" dependencies = [ "serde", "serde_json", @@ -8571,7 +8723,7 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -8583,7 +8735,7 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -8592,17 +8744,17 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0" +version = "0.9.0" dependencies = [ "hash-db", "hex-literal", "log", "num-traits", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "pretty_assertions", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", "sp-core", "sp-externalities", "sp-panic-handler", @@ -8616,11 +8768,11 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0" +version = "3.0.0" [[package]] name = "sp-storage" -version = "2.0.0" +version = "3.0.0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8632,7 +8784,7 @@ dependencies = [ [[package]] name = "sp-tasks" -version = "2.0.0" +version = "3.0.0" dependencies = [ "log", "parity-scale-codec", @@ -8657,7 +8809,7 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0" +version = "3.0.0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8670,7 +8822,7 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "2.0.0" +version = "3.0.0" dependencies = [ "log", "parity-scale-codec", @@ -8682,21 +8834,22 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "2.0.0" +version = "3.0.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.12", "log", "parity-scale-codec", "serde", "sp-api", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] name = "sp-trie" -version = "2.0.0" +version = "3.0.0" dependencies = [ "criterion", "hash-db", @@ -8714,9 +8867,9 @@ dependencies = [ [[package]] name = "sp-utils" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -8725,7 +8878,7 @@ dependencies = [ [[package]] name = "sp-version" -version = "2.0.0" +version = "3.0.0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8736,7 +8889,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0" +version = "3.0.0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8764,20 +8917,11 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "statrs" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" -dependencies = [ - "rand 0.5.6", -] - -[[package]] -name = "stream-cipher" -version = "0.4.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" +checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" dependencies = [ - "generic-array 0.14.3", + "rand 0.7.3", ] [[package]] @@ -8786,8 +8930,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" dependencies = [ - "block-cipher 0.8.0", - "generic-array 0.14.3", + "block-cipher", + "generic-array 0.14.4", ] [[package]] @@ -8807,9 +8951,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.15" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" +checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" dependencies = [ "clap", "lazy_static", @@ -8818,9 +8962,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.8" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" +checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" dependencies = [ "heck", "proc-macro-error", @@ -8831,18 +8975,18 @@ dependencies = [ [[package]] name = "strum" -version = "0.16.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" +checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.16.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" +checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" dependencies = [ "heck", "proc-macro2", @@ -8854,13 +8998,8 @@ dependencies = [ name = "subkey" version = "2.0.0" dependencies = [ - "frame-system", - "node-primitives", - "node-runtime", "sc-cli", - "sp-core", "structopt", - "substrate-frame-cli", ] [[package]] @@ -8869,8 +9008,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" dependencies = [ - "hmac", - "pbkdf2", + "hmac 0.7.1", + "pbkdf2 0.3.0", "schnorrkel", "sha2 0.8.2", "zeroize", @@ -8878,24 +9017,25 @@ dependencies = [ [[package]] name = "substrate-browser-utils" -version = "0.8.0" +version = "0.9.0" dependencies = [ "chrono", "console_error_panic_hook", - "console_log", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.12", "futures-timer 3.0.2", + "getrandom 0.2.2", "js-sys", "kvdb-web", "libp2p-wasm-ext", "log", - "rand 0.6.5", "rand 0.7.3", "sc-chain-spec", "sc-informant", "sc-network", "sc-service", + "sc-telemetry", + "sc-tracing", "sp-database", "wasm-bindgen", "wasm-bindgen-futures", @@ -8903,14 +9043,14 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0" +version = "3.0.0" dependencies = [ "platforms", ] [[package]] name = "substrate-frame-cli" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-system", "sc-cli", @@ -8921,26 +9061,26 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "futures 0.3.5", + "futures 0.3.12", "jsonrpc-client-transports", "jsonrpc-core", "parity-scale-codec", "sc-rpc-api", "serde", "sp-storage", - "tokio 0.2.22", + "tokio 0.2.25", ] [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.5", + "futures 0.3.12", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -8962,23 +9102,23 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0" +version = "0.9.0" dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.7", + "hyper 0.13.9", "log", "prometheus", - "tokio 0.2.22", + "tokio 0.2.25", ] [[package]] name = "substrate-test-client" -version = "2.0.0" +version = "2.0.1" dependencies = [ - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.12", "hash-db", "hex", "parity-scale-codec", @@ -9003,7 +9143,7 @@ dependencies = [ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "frame-executive", "frame-support", "frame-system", @@ -9039,7 +9179,7 @@ dependencies = [ "sp-trie", "sp-version", "substrate-test-runtime-client", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", "trie-db", ] @@ -9047,7 +9187,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -9068,9 +9208,9 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.12", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "sc-transaction-graph", "sp-blockchain", "sp-runtime", @@ -9080,18 +9220,18 @@ dependencies = [ [[package]] name = "substrate-test-utils" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "sc-service", "substrate-test-utils-derive", - "tokio 0.2.22", + "tokio 0.2.25", "trybuild", ] [[package]] name = "substrate-test-utils-derive" -version = "0.8.0" +version = "0.9.0" dependencies = [ "proc-macro-crate", "quote", @@ -9104,29 +9244,23 @@ version = "0.1.0" dependencies = [ "sc-service", "substrate-test-utils", - "tokio 0.2.22", + "tokio 0.2.25", ] [[package]] name = "substrate-wasm-builder" -version = "2.0.1" +version = "4.0.0" dependencies = [ "ansi_term 0.12.1", "atty", "build-helper", "cargo_metadata", - "fs2", - "itertools 0.8.2", "tempfile", "toml", "walkdir", "wasm-gc-api", ] -[[package]] -name = "substrate-wasm-builder-runner" -version = "2.0.0" - [[package]] name = "subtle" version = "1.0.0" @@ -9135,32 +9269,21 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.2.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.44" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] -[[package]] -name = "syn-mid" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "synstructure" version = "0.12.4" @@ -9179,48 +9302,41 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" +[[package]] +name = "tap" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36474e732d1affd3a6ed582781b3683df3d0563714c59c39591e8ff707cf078e" + [[package]] name = "target-lexicon" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d" +checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9" [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.3", + "redox_syscall 0.2.4", "remove_dir_all", "winapi 0.3.9", ] [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] -[[package]] -name = "test-case" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" -dependencies = [ - "lazy_static", - "proc-macro2", - "quote", - "syn", - "version_check", -] - [[package]] name = "textwrap" version = "0.11.0" @@ -9232,18 +9348,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2", "quote", @@ -9252,11 +9368,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -9270,28 +9386,31 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "tiny-bip39" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" dependencies = [ - "failure", - "hmac", - "once_cell 1.4.1", - "pbkdf2", + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.8.2", + "sha2 0.9.3", + "thiserror", "unicode-normalization", + "zeroize", ] [[package]] @@ -9305,9 +9424,9 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" +checksum = "a2ada8616fad06a2d0c455adc530de4ef57605a8120cc65da9653e0e9623ca74" dependencies = [ "serde", "serde_json", @@ -9315,9 +9434,18 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" @@ -9326,16 +9454,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "mio", "num_cpus", "tokio-codec", "tokio-current-thread", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-fs", "tokio-io", "tokio-reactor", - "tokio-sync 0.1.8", + "tokio-sync", "tokio-tcp", "tokio-threadpool", "tokio-timer", @@ -9345,9 +9473,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ "bytes 0.5.6", "fnv", @@ -9359,7 +9487,7 @@ dependencies = [ "mio", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.11", "signal-hook-registry", "slab", "tokio-macros", @@ -9374,7 +9502,7 @@ checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" dependencies = [ "bytes 0.4.12", "either", - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -9384,7 +9512,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "tokio-io", ] @@ -9394,8 +9522,8 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures 0.1.29", - "tokio-executor 0.1.10", + "futures 0.1.30", + "tokio-executor", ] [[package]] @@ -9404,19 +9532,8 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", -] - -[[package]] -name = "tokio-executor" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee9ceecf69145923834ea73f32ba40c790fd877b74a7817dd0b089f1eb9c7c8" -dependencies = [ - "futures-util-preview", - "lazy_static", - "tokio-sync 0.2.0-alpha.6", + "crossbeam-utils 0.7.2", + "futures 0.1.30", ] [[package]] @@ -9425,7 +9542,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "tokio-io", "tokio-threadpool", ] @@ -9437,15 +9554,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "log", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -9459,7 +9576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "mio", "mio-named-pipes", "tokio 0.1.22", @@ -9471,28 +9588,28 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", "lazy_static", "log", "mio", "num_cpus", "parking_lot 0.9.0", "slab", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", - "tokio-sync 0.1.8", + "tokio-sync", ] [[package]] name = "tokio-rustls" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", - "rustls", - "tokio 0.2.22", + "rustls 0.18.1", + "tokio 0.2.25", "webpki", ] @@ -9502,7 +9619,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -9512,18 +9629,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.29", -] - -[[package]] -name = "tokio-sync" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1aaeb685540f7407ea0e27f1c9757d258c7c6bf4e3eb19da6fc59b747239d2" -dependencies = [ - "fnv", - "futures-core-preview", - "futures-util-preview", + "futures 0.1.30", ] [[package]] @@ -9533,7 +9639,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "mio", "tokio-io", @@ -9546,15 +9652,15 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ - "crossbeam-deque", + "crossbeam-deque 0.7.3", "crossbeam-queue", - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", "lazy_static", "log", "num_cpus", "slab", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9563,10 +9669,10 @@ version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", "slab", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9576,7 +9682,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "log", "mio", "tokio-codec", @@ -9591,7 +9697,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "libc", "log", @@ -9612,34 +9718,34 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite", - "tokio 0.2.22", + "pin-project-lite 0.1.11", + "tokio 0.2.25", ] [[package]] name = "toml" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "log", - "pin-project-lite", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] @@ -9670,7 +9776,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" dependencies = [ - "pin-project 0.4.22", + "pin-project 0.4.27", "tracing", ] @@ -9697,9 +9803,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -9709,7 +9815,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.4.1", + "smallvec 1.6.1", "thread_local", "tracing", "tracing-core", @@ -9725,9 +9831,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.25.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af2cc37cac8cc158119982c920cbb9b8243d8540c1d13b8aca84484bfc83a426" +checksum = "568257edb909a5c532b1f4ab38ee6b5dedfbf8775be6a55a29020513ebe3e072" dependencies = [ "criterion", "hash-db", @@ -9741,15 +9847,15 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.0" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" +checksum = "ec051edf7f0fc9499a2cb0947652cab2148b9d7f61cee7605e312e9f970dacaf" dependencies = [ "hash-db", - "hashbrown 0.8.1", + "hashbrown", "log", "rustc-hex", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] @@ -9779,9 +9885,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "trybuild" -version = "1.0.35" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7d30fe369fd650072b352b1a9cb9587669de6b89be3b8225544012c1c45292d" +checksum = "1c9594b802f041389d2baac680663573dde3103bb4a4926d61d6aba689465978" dependencies = [ "dissimilar", "glob", @@ -9794,11 +9900,13 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" +checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" dependencies = [ - "rand 0.3.23", + "cfg-if 0.1.10", + "rand 0.7.3", + "static_assertions", ] [[package]] @@ -9815,13 +9923,13 @@ checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" [[package]] name = "uint" -version = "0.8.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" +checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" dependencies = [ "byteorder", "crunchy", - "rustc-hex", + "hex", "static_assertions", ] @@ -9845,18 +9953,18 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-width" @@ -9876,38 +9984,26 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.3", - "subtle 2.2.3", + "generic-array 0.14.4", + "subtle 2.4.0", ] [[package]] name = "unsigned-varint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" - -[[package]] -name = "unsigned-varint" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" -dependencies = [ - "bytes 0.5.6", - "futures-io", - "futures-util", - "futures_codec", -] +checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" [[package]] name = "unsigned-varint" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" +checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" dependencies = [ - "bytes 0.5.6", + "asynchronous-codec", + "bytes 1.0.1", "futures-io", "futures-util", - "futures_codec", ] [[package]] @@ -9929,20 +10025,30 @@ dependencies = [ [[package]] name = "url" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" dependencies = [ + "form_urlencoded", "idna 0.2.0", "matches", "percent-encoding 2.1.0", ] +[[package]] +name = "value-bag" +version = "1.0.0-alpha.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1" +dependencies = [ + "ctor", +] + [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "vec-arena" @@ -9979,9 +10085,9 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" @@ -10000,7 +10106,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "log", "try-lock", ] @@ -10021,13 +10127,19 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" -version = "0.2.67" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" +checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -10035,9 +10147,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.67" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" +checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" dependencies = [ "bumpalo", "lazy_static", @@ -10050,11 +10162,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.12" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" +checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -10062,9 +10174,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.67" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" +checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10072,9 +10184,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.67" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" +checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" dependencies = [ "proc-macro2", "quote", @@ -10085,15 +10197,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.67" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" +checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" [[package]] name = "wasm-bindgen-test" -version = "0.3.12" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e9dad8040e378f0696b017570c6bc929aac373180e06b3d67ac5059c52da3" +checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" dependencies = [ "console_error_panic_hook", "js-sys", @@ -10105,9 +10217,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.12" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c358c8d2507c1bae25efa069e62ea907aa28700b25c8c33dafb0b15ba4603627" +checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" dependencies = [ "proc-macro2", "quote", @@ -10126,15 +10238,14 @@ dependencies = [ [[package]] name = "wasm-timer" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "js-sys", - "parking_lot 0.9.0", + "parking_lot 0.11.1", "pin-utils", - "send_wrapper 0.2.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -10166,33 +10277,31 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.57.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fddd575d477c6e9702484139cf9f23dcd554b06d185ed0f56c857dd3a47aa6" - -[[package]] -name = "wasmparser" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a950e6a618f62147fd514ff445b2a0b53120d382751960797f85f058c7eda9b9" +checksum = "89a30c99437829ede826802bfcf28500cf58df00e66cb9114df98813bc145ff1" [[package]] name = "wasmtime" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd3c4f449382779ef6e0a7c3ec6752ae614e20a42e4100000c3efdc973100e2" +checksum = "7426055cb92bd9a1e9469b48154d8d6119cd8c498c8b70284e420342c05dc45d" dependencies = [ "anyhow", "backtrace", - "cfg-if", - "lazy_static", + "bincode", + "cfg-if 1.0.0", + "cpp_demangle", + "indexmap", "libc", "log", "region", "rustc-demangle", - "smallvec 1.4.1", + "serde", + "smallvec 1.6.1", "target-lexicon", - "wasmparser 0.59.0", + "wasmparser", + "wasmtime-cache", "wasmtime-environ", "wasmtime-jit", "wasmtime-profiling", @@ -10201,74 +10310,101 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "wasmtime-cache" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c01d9287e36921e46f5887a47007824ae5dbb9b7517a2d565660ab4471478709" +dependencies = [ + "anyhow", + "base64 0.13.0", + "bincode", + "directories-next", + "errno", + "file-per-thread-logger", + "libc", + "log", + "serde", + "sha2 0.9.3", + "toml", + "winapi 0.3.9", + "zstd", +] + +[[package]] +name = "wasmtime-cranelift" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4134ed3a4316cd0de0e546c6004850afe472b0fa3fcdc2f2c15f8d449562d962" +dependencies = [ + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "cranelift-wasm", + "wasmtime-environ", +] + [[package]] name = "wasmtime-debug" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e634af9067a3af6cf2c7d33dc3b84767ddaf5d010ba68e80eecbcea73d4a349" +checksum = "e91fa931df6dd8af2b02606307674d3bad23f55473d5f4c809dddf7e4c4dc411" dependencies = [ "anyhow", - "gimli 0.21.0", + "gimli", "more-asserts", - "object 0.20.0", + "object 0.22.0", "target-lexicon", "thiserror", - "wasmparser 0.59.0", + "wasmparser", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f85619a94ee4034bd5bb87fc3dcf71fd2237b81c840809da1201061eec9ab3" +checksum = "a1098871dc3120aaf8190d79153e470658bb79f63ee9ca31716711e123c28220" dependencies = [ "anyhow", - "base64 0.12.3", - "bincode", - "cfg-if", + "cfg-if 1.0.0", "cranelift-codegen", "cranelift-entity", - "cranelift-frontend", "cranelift-wasm", - "directories", - "errno", - "file-per-thread-logger", + "gimli", "indexmap", - "libc", "log", "more-asserts", - "rayon", "serde", - "sha2 0.8.2", "thiserror", - "toml", - "wasmparser 0.59.0", - "winapi 0.3.9", - "zstd", + "wasmparser", ] [[package]] name = "wasmtime-jit" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e914c013c7a9f15f4e429d5431f2830fb8adb56e40567661b69c5ec1d645be23" +checksum = "738bfcd1561ede8bb174215776fd7d9a95d5f0a47ca3deabe0282c55f9a89f68" dependencies = [ + "addr2line", "anyhow", - "cfg-if", + "cfg-if 1.0.0", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli 0.21.0", + "gimli", "log", "more-asserts", - "object 0.20.0", + "object 0.22.0", + "rayon", "region", + "serde", "target-lexicon", "thiserror", - "wasmparser 0.59.0", + "wasmparser", + "wasmtime-cranelift", "wasmtime-debug", "wasmtime-environ", "wasmtime-obj", @@ -10279,13 +10415,13 @@ dependencies = [ [[package]] name = "wasmtime-obj" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e81d8e02e9bc9fe2da9b6d48bbc217f96e089f7df613f11a28a3958abc44641e" +checksum = "3e96d77f1801131c5e86d93e42a3cf8a35402107332c202c245c83f34888a906" dependencies = [ "anyhow", "more-asserts", - "object 0.20.0", + "object 0.22.0", "target-lexicon", "wasmtime-debug", "wasmtime-environ", @@ -10293,16 +10429,16 @@ dependencies = [ [[package]] name = "wasmtime-profiling" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8d4d1af8dd5f7096cfcc89dd668d358e52980c38cce199643372ffd6590e27" +checksum = "60bb672c9d894776d7b9250dd9b4fe890f8760201ee4f53e5f2da772b6c4debb" dependencies = [ "anyhow", - "cfg-if", - "gimli 0.21.0", + "cfg-if 1.0.0", + "gimli", "lazy_static", "libc", - "object 0.19.0", + "object 0.22.0", "scroll", "serde", "target-lexicon", @@ -10312,19 +10448,20 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a25f140bbbaadb07c531cba99ce1a966dba216138dc1b2a0ddecec851a01a93" +checksum = "a978086740949eeedfefcee667b57a9e98d9a7fc0de382fcfa0da30369e3530d" dependencies = [ "backtrace", "cc", - "cfg-if", + "cfg-if 1.0.0", "indexmap", "lazy_static", "libc", "log", - "memoffset", + "memoffset 0.6.1", "more-asserts", + "psm", "region", "thiserror", "wasmtime-environ", @@ -10333,27 +10470,27 @@ dependencies = [ [[package]] name = "wast" -version = "21.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1844f66a2bc8526d71690104c0e78a8e59ffa1597b7245769d174ebb91deb5" +checksum = "c24a3ee360d01d60ed0a0f960ab76a6acce64348cdb0bf8699c2a866fad57c7c" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.22" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce85d72b74242c340e9e3492cfb602652d7bb324c3172dd441b5577e39a2e18c" +checksum = "5e8f7f34773fa6318e8897283abf7941c1f250faae4e1a52f82df09c3bad7cce" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.39" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" +checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" dependencies = [ "js-sys", "wasm-bindgen", @@ -10361,9 +10498,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ "ring", "untrusted", @@ -10371,18 +10508,18 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ "webpki", ] [[package]] -name = "wepoll-sys-stjepang" -version = "1.0.6" +name = "wepoll-sys" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] @@ -10396,6 +10533,16 @@ dependencies = [ "libc", ] +[[package]] +name = "which" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef" +dependencies = [ + "libc", + "thiserror", +] + [[package]] name = "winapi" version = "0.2.8" @@ -10450,15 +10597,10 @@ dependencies = [ ] [[package]] -name = "x25519-dalek" -version = "0.6.0" +name = "wyz" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" -dependencies = [ - "curve25519-dalek 2.1.0", - "rand_core 0.5.1", - "zeroize", -] +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" [[package]] name = "x25519-dalek" @@ -10466,7 +10608,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" dependencies = [ - "curve25519-dalek 3.0.0", + "curve25519-dalek 3.0.2", "rand_core 0.5.1", "zeroize", ] @@ -10477,28 +10619,28 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aeb8c4043cac71c3c299dff107171c220d179492350ea198e109a414981b83c" dependencies = [ - "futures 0.3.5", + "futures 0.3.12", "log", "nohash-hasher", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "rand 0.7.3", "static_assertions", ] [[package]] name = "zeroize" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" dependencies = [ "proc-macro2", "quote", @@ -10508,18 +10650,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.3+zstd.1.4.5" +version = "0.5.4+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8" +checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.5+zstd.1.4.5" +version = "2.0.6+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055" +checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" dependencies = [ "libc", "zstd-sys", @@ -10527,9 +10669,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.17+zstd.1.4.5" +version = "1.4.18+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" +checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" dependencies = [ "cc", "glob", diff --git a/Cargo.toml b/Cargo.toml index b78c4da055801..38b3a2bdcf296 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,19 +1,19 @@ [workspace] members = [ "bin/node-template/node", - "bin/node-template/runtime", "bin/node-template/pallets/template", + "bin/node-template/runtime", "bin/node/bench", "bin/node/browser-testing", "bin/node/cli", "bin/node/executor", "bin/node/primitives", - "bin/node/rpc-client", "bin/node/rpc", + "bin/node/rpc-client", "bin/node/runtime", "bin/node/testing", - "bin/utils/subkey", "bin/utils/chain-spec-builder", + "bin/utils/subkey", "client/api", "client/authority-discovery", "client/basic-authorship", @@ -21,61 +21,59 @@ members = [ "client/chain-spec", "client/chain-spec/derive", "client/cli", - "client/cli/proc-macro", "client/consensus/aura", "client/consensus/babe", "client/consensus/babe/rpc", "client/consensus/common", + "client/consensus/epochs", "client/consensus/manual-seal", "client/consensus/pow", - "client/consensus/uncles", "client/consensus/slots", - "client/consensus/epochs", + "client/consensus/uncles", "client/db", "client/executor", "client/executor/common", + "client/executor/runtime-test", "client/executor/wasmi", "client/executor/wasmtime", - "client/executor/runtime-test", "client/finality-grandpa", + "client/finality-grandpa-warp-sync", "client/informant", - "client/light", - "client/tracing", "client/keystore", + "client/light", "client/network", - "client/network/test", "client/network-gossip", + "client/network/test", "client/offchain", "client/peerset", "client/proposer-metrics", - "client/rpc-servers", "client/rpc", "client/rpc-api", + "client/rpc-servers", "client/service", "client/service/test", "client/state-db", "client/sync-state-rpc", "client/telemetry", + "client/tracing", + "client/tracing/proc-macro", "client/transaction-pool", "client/transaction-pool/graph", - "utils/prometheus", - "utils/wasm-builder-runner", "frame/assets", - "frame/aura", "frame/atomic-swap", + "frame/aura", "frame/authority-discovery", "frame/authorship", "frame/babe", "frame/balances", "frame/benchmarking", + "frame/bounties", "frame/collective", "frame/contracts", "frame/contracts/rpc", "frame/contracts/rpc/runtime-api", "frame/democracy", - "frame/elections-phragmen", "frame/elections", - "frame/evm", "frame/example", "frame/example-offchain-worker", "frame/example-parallel", @@ -84,7 +82,10 @@ members = [ "frame/identity", "frame/im-online", "frame/indices", + "frame/lottery", "frame/membership", + "frame/merkle-mountain-range", + "frame/merkle-mountain-range/primitives", "frame/metadata", "frame/multisig", "frame/nicks", @@ -99,8 +100,8 @@ members = [ "frame/session/benchmarking", "frame/society", "frame/staking", - "frame/staking/reward-curve", "frame/staking/fuzzer", + "frame/staking/reward-curve", "frame/sudo", "frame/support", "frame/support/procedural", @@ -115,62 +116,64 @@ members = [ "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", "frame/treasury", + "frame/tips", "frame/utility", "frame/vesting", "primitives/allocator", + "primitives/api", + "primitives/api/proc-macro", + "primitives/api/test", "primitives/application-crypto", "primitives/application-crypto/test", + "primitives/arithmetic", + "primitives/arithmetic/fuzzer", "primitives/authority-discovery", "primitives/authorship", "primitives/block-builder", "primitives/blockchain", + "primitives/chain-spec", "primitives/consensus/aura", "primitives/consensus/babe", "primitives/consensus/common", "primitives/consensus/pow", "primitives/consensus/vrf", "primitives/core", - "primitives/chain-spec", "primitives/database", "primitives/debug-derive", - "primitives/storage", "primitives/externalities", + "primitives/election-providers", "primitives/finality-grandpa", "primitives/inherents", + "primitives/io", "primitives/keyring", "primitives/keystore", - "primitives/offchain", - "primitives/panic-handler", "primitives/npos-elections", - "primitives/npos-elections/fuzzer", "primitives/npos-elections/compact", + "primitives/npos-elections/fuzzer", + "primitives/offchain", + "primitives/panic-handler", "primitives/rpc", + "primitives/runtime", "primitives/runtime-interface", "primitives/runtime-interface/proc-macro", + "primitives/runtime-interface/test", "primitives/runtime-interface/test-wasm", "primitives/runtime-interface/test-wasm-deprecated", - "primitives/runtime-interface/test", + "primitives/sandbox", "primitives/serializer", "primitives/session", - "primitives/api", - "primitives/api/proc-macro", - "primitives/api/test", - "primitives/arithmetic", - "primitives/arithmetic/fuzzer", - "primitives/io", - "primitives/runtime", - "primitives/sandbox", "primitives/staking", - "primitives/std", - "primitives/version", "primitives/state-machine", + "primitives/std", + "primitives/storage", "primitives/tasks", - "primitives/timestamp", "primitives/test-primitives", - "primitives/transaction-pool", + "primitives/timestamp", "primitives/tracing", + "primitives/transaction-pool", "primitives/trie", "primitives/utils", + "primitives/version", "primitives/wasm-interface", "test-utils/client", "test-utils/derive", @@ -185,6 +188,7 @@ members = [ "utils/frame/frame-utilities-cli", "utils/frame/rpc/support", "utils/frame/rpc/system", + "utils/prometheus", "utils/wasm-builder", ] @@ -209,7 +213,6 @@ aesni = { opt-level = 3 } blake2 = { opt-level = 3 } blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } -blake2s_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } cranelift-wasm = { opt-level = 3 } @@ -219,8 +222,6 @@ crossbeam-queue = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } -evm-core = { opt-level = 3 } -evm-runtime = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/HEADER b/HEADER-APACHE2 similarity index 92% rename from HEADER rename to HEADER-APACHE2 index c9b28a07b0f22..f364f4bdf845a 100644 --- a/HEADER +++ b/HEADER-APACHE2 @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/license_header.txt b/HEADER-GPL3 similarity index 50% rename from docs/license_header.txt rename to HEADER-GPL3 index f9c1daa1ad1c1..0dd7e4f76028f 100644 --- a/docs/license_header.txt +++ b/HEADER-GPL3 @@ -1,15 +1,17 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . diff --git a/README.md b/README.md index c586919a1ddc3..94de8533be266 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc) +# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc)

diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 5623fedb5342b..8c8b82a14bb86 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -55,7 +55,7 @@ RUST_LOG=debug RUST_BACKTRACE=1 ./target/release/node-template -lruntime=debug - ### Multi-Node Local Testnet To see the multi-node consensus algorithm in action, run a local testnet with two validator nodes, -Alice and Bob, that have been [configured](/bin/node-template/node/src/chain_spec.rs) as the initial +Alice and Bob, that have been [configured](./node/src/chain_spec.rs) as the initial authorities of the `local` testnet chain and endowed with testnet units. Note: this will require two terminal sessions (one for each node). @@ -157,7 +157,7 @@ Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this the following: - This file configures several pallets to include in the runtime. Each pallet configuration is - defined by a code block that begins with `impl $PALLET_NAME::Trait for Runtime`. + defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core @@ -181,8 +181,8 @@ A FRAME pallet is compromised of a number of blockchain primitives: - Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. -- Trait: The `Trait` configuration interface is used to define the types and parameters upon which - a FRAME pallet depends. +- Config: The `Config` configuration interface is used to define the types and parameters upon + which a FRAME pallet depends. ## Generate a Custom Node Template diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 8b1a47fd2bf15..464b07cb98f0b 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -18,41 +18,43 @@ name = "node-template" [dependencies] structopt = "0.3.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli", features = ["wasmtime"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } -sc-service = { version = "0.8.0", path = "../../../client/service", features = ["wasmtime"] } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-consensus-aura = { version = "0.8.0", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8.0", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-finality-grandpa = { version = "0.8.0", path = "../../../client/finality-grandpa" } -sp-finality-grandpa = { version = "2.0.0", path = "../../../primitives/finality-grandpa" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-cli = { version = "0.9.0", path = "../../../client/cli", features = ["wasmtime"] } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sc-executor = { version = "0.9.0", path = "../../../client/executor", features = ["wasmtime"] } +sc-service = { version = "0.9.0", path = "../../../client/service", features = ["wasmtime"] } +sc-telemetry = { version = "3.0.0", path = "../../../client/telemetry" } +sc-keystore = { version = "3.0.0", path = "../../../client/keystore" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "3.0.0", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } +sc-consensus-aura = { version = "0.9.0", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.9.0", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } +sc-finality-grandpa = { version = "0.9.0", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "3.0.0", path = "../../../primitives/finality-grandpa" } +sc-client-api = { version = "3.0.0", path = "../../../client/api" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } # These dependencies are used for the node template's RPCs -jsonrpc-core = "15.0.0" -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-rpc-api = { version = "0.8.0", path = "../../../client/rpc-api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } +jsonrpc-core = "15.1.0" +sc-rpc = { version = "3.0.0", path = "../../../client/rpc" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.9.0", path = "../../../client/rpc-api" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "3.0.0", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.9.0", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "3.0.0", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "3.0.0", path = "../../../frame/transaction-payment/rpc/" } # These dependencies are used for runtime benchmarking -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } -frame-benchmarking-cli = { version = "2.0.0", path = "../../../utils/frame/benchmarking-cli" } +frame-benchmarking = { version = "3.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking-cli = { version = "3.0.0", path = "../../../utils/frame/benchmarking-cli" } node-template-runtime = { version = "2.0.0", path = "../runtime" } [build-dependencies] -substrate-build-script-utils = { version = "2.0.0", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } [features] default = [] diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 41f582fb64a46..c5451e81f20c1 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -39,7 +39,7 @@ pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { } pub fn development_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( // Name @@ -78,7 +78,7 @@ pub fn development_config() -> Result { } pub fn local_testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( // Name diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index f2faf17e4ddf4..947123a6bbf5b 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -12,6 +12,8 @@ pub struct Cli { #[derive(Debug, StructOpt)] pub enum Subcommand { + /// Key management cli utilities + Key(sc_cli::KeySubcommand), /// Build a chain specification. BuildSpec(sc_cli::BuildSpecCmd), diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index ac950b50483ac..e61dd86418825 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -66,6 +66,7 @@ pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) @@ -130,7 +131,7 @@ pub fn run() -> sc_cli::Result<()> { match config.role { Role::Light => service::new_light(config), _ => service::new_full(config), - } + }.map_err(sc_cli::Error::Service) }) } } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 90187061c9cf7..4061dce438892 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -9,7 +9,8 @@ use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_finality_grandpa::{FinalityProofProvider as GrandpaFinalityProofProvider, SharedVoterState}; +use sc_finality_grandpa::SharedVoterState; +use sc_keystore::LocalKeystore; // Our native executor instance. native_executor_instance!( @@ -34,9 +35,13 @@ pub fn new_partial(config: &Configuration) -> Result, AuraPair >, - sc_finality_grandpa::LinkHalf + sc_finality_grandpa::LinkHalf, ) >, ServiceError> { + if config.keystore_remote.is_some() { + return Err(ServiceError::Other( + format!("Remote Keystores are not supported."))) + } let inherent_data_providers = sp_inherents::InherentDataProviders::new(); let (client, backend, keystore_container, task_manager) = @@ -47,6 +52,7 @@ pub fn new_partial(config: &Configuration) -> Result Result Result Result, &'static str> { + // FIXME: here would the concrete keystore be built, + // must return a concrete type (NOT `LocalKeystore`) that + // implements `CryptoStore` and `SyncCryptoStore` + Err("Remote Keystore not supported.") +} + /// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(mut config: Configuration) -> Result { let sc_service::PartialComponents { - client, backend, mut task_manager, import_queue, keystore_container, - select_chain, transaction_pool, inherent_data_providers, + client, + backend, + mut task_manager, + import_queue, + mut keystore_container, + select_chain, + transaction_pool, + inherent_data_providers, other: (block_import, grandpa_link), } = new_partial(&config)?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + if let Some(url) = &config.keystore_remote { + match remote_keystore(url) { + Ok(k) => keystore_container.set_remote_keystore(k), + Err(e) => { + return Err(ServiceError::Other( + format!("Error hooking up remote keystore for {}: {}", url, e))) + } + }; + } + + config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -99,8 +132,6 @@ pub fn new_full(config: Configuration) -> Result { import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), })?; if config.offchain_worker.enabled { @@ -111,10 +142,10 @@ pub fn new_full(config: Configuration) -> Result { let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks: Option<()> = None; let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); let rpc_extensions_builder = { let client = client.clone(); @@ -131,18 +162,22 @@ pub fn new_full(config: Configuration) -> Result { }) }; - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - telemetry_connection_sinks: telemetry_connection_sinks.clone(), - rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, network_status_sinks, system_rpc_tx, config, - })?; + let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( + sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder, + on_demand: None, + remote_blockchain: None, + backend, + network_status_sinks, + system_rpc_tx, + config, + }, + )?; if role.is_authority() { let proposer = sc_basic_authorship::ProposerFactory::new( @@ -155,7 +190,7 @@ pub fn new_full(config: Configuration) -> Result { let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( + let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _,_>( sc_consensus_aura::slot_duration(&*client)?, client.clone(), select_chain, @@ -164,6 +199,7 @@ pub fn new_full(config: Configuration) -> Result { network.clone(), inherent_data_providers.clone(), force_authoring, + backoff_authoring_blocks, keystore_container.sync_keystore(), can_author_with, )?; @@ -202,7 +238,7 @@ pub fn new_full(config: Configuration) -> Result { config: grandpa_config, link: grandpa_link, network, - telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), + telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), @@ -214,8 +250,6 @@ pub fn new_full(config: Configuration) -> Result { "grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)? ); - } else { - sc_finality_grandpa::setup_disabled_grandpa(network)?; } network_starter.start_network(); @@ -223,10 +257,14 @@ pub fn new_full(config: Configuration) -> Result { } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) -> Result { +pub fn new_light(mut config: Configuration) -> Result { let (client, backend, keystore_container, mut task_manager, on_demand) = sc_service::new_light_parts::(&config)?; + config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), @@ -235,19 +273,21 @@ pub fn new_light(config: Configuration) -> Result { on_demand.clone(), )); - let grandpa_block_import = sc_finality_grandpa::light_block_import( - client.clone(), backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()) as Arc<_>, + let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), )?; - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( + grandpa_block_import.clone(), + client.clone(), + ); let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( sc_consensus_aura::slot_duration(&*client)?, - grandpa_block_import, - None, - Some(Box::new(finality_proof_import)), + aura_block_import, + Some(Box::new(grandpa_block_import)), client.clone(), InherentDataProviders::new(), &task_manager.spawn_handle(), @@ -255,9 +295,6 @@ pub fn new_light(config: Configuration) -> Result { sp_consensus::NeverCanAuthor, )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, @@ -267,8 +304,6 @@ pub fn new_light(config: Configuration) -> Result { import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), })?; if config.offchain_worker.enabled { @@ -283,7 +318,6 @@ pub fn new_light(config: Configuration) -> Result { task_manager: &mut task_manager, on_demand: Some(on_demand), rpc_extensions_builder: Box::new(|_, _| ()), - telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), config, client, keystore: keystore_container.sync_keystore(), @@ -291,9 +325,9 @@ pub fn new_light(config: Configuration) -> Result { network, network_status_sinks, system_rpc_tx, - })?; + })?; - network_starter.start_network(); + network_starter.start_network(); - Ok(task_manager) + Ok(task_manager) } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 12b810de186f4..a13d05082b016 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -13,31 +13,34 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [dependencies.frame-support] default-features = false -version = "2.0.0" +version = "3.0.0" path = "../../../../frame/support" [dependencies.frame-system] default-features = false -version = "2.0.0" +version = "3.0.0" path = "../../../../frame/system" +[dev-dependencies] +serde = { version = "1.0.101" } + [dev-dependencies.sp-core] default-features = false -version = "2.0.0" +version = "3.0.0" path = "../../../../primitives/core" [dev-dependencies.sp-io] default-features = false -version = "2.0.0" +version = "3.0.0" path = "../../../../primitives/io" [dev-dependencies.sp-runtime] default-features = false -version = "2.0.0" +version = "3.0.0" path = "../../../../primitives/runtime" diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 729a71278aa9f..52d9e8111d139 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -4,8 +4,7 @@ /// Learn more about FRAME and the core library of Substrate FRAME pallets: /// https://substrate.dev/docs/en/knowledgebase/runtime/frame -use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch, traits::Get}; -use frame_system::ensure_signed; +pub use pallet::*; #[cfg(test)] mod mock; @@ -13,89 +12,91 @@ mod mock; #[cfg(test)] mod tests; -/// Configure the pallet by specifying the parameters and types on which it depends. -pub trait Trait: frame_system::Trait { - /// Because this pallet emits events, it depends on the runtime's definition of an event. - type Event: From> + Into<::Event>; -} +#[frame_support::pallet] +pub mod pallet { + use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; + use frame_system::pallet_prelude::*; -// The pallet's runtime storage items. -// https://substrate.dev/docs/en/knowledgebase/runtime/storage -decl_storage! { - // A unique name is used to ensure that the pallet's storage items are isolated. - // This name may be updated, but each pallet in the runtime must use a unique name. - // ---------------------------------vvvvvvvvvvvvvv - trait Store for Module as TemplateModule { - // Learn more about declaring storage items: - // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items - Something get(fn something): Option; + /// Configure the pallet by specifying the parameters and types on which it depends. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Because this pallet emits events, it depends on the runtime's definition of an event. + type Event: From> + IsType<::Event>; } -} -// Pallets use events to inform users when important changes are made. -// https://substrate.dev/docs/en/knowledgebase/runtime/events -decl_event!( - pub enum Event where AccountId = ::AccountId { + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + // The pallet's runtime storage items. + // https://substrate.dev/docs/en/knowledgebase/runtime/storage + #[pallet::storage] + #[pallet::getter(fn something)] + // Learn more about declaring storage items: + // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items + pub type Something = StorageValue<_, u32>; + + // Pallets use events to inform users when important changes are made. + // https://substrate.dev/docs/en/knowledgebase/runtime/events + #[pallet::event] + #[pallet::metadata(T::AccountId = "AccountId")] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// Event documentation should end with an array that provides descriptive names for event /// parameters. [something, who] - SomethingStored(u32, AccountId), + SomethingStored(u32, T::AccountId), } -); - -// Errors inform users that something went wrong. -decl_error! { - pub enum Error for Module { + + // Errors inform users that something went wrong. + #[pallet::error] + pub enum Error { /// Error names should be descriptive. NoneValue, /// Errors should have helpful documentation associated with them. StorageOverflow, } -} - -// Dispatchable functions allows users to interact with the pallet and invoke state changes. -// These functions materialize as "extrinsics", which are often compared to transactions. -// Dispatchable functions must be annotated with a weight and must return a DispatchResult. -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - // Errors must be initialized if they are used by the pallet. - type Error = Error; - // Events must be initialized if they are used by the pallet. - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet {} + // Dispatchable functions allows users to interact with the pallet and invoke state changes. + // These functions materialize as "extrinsics", which are often compared to transactions. + // Dispatchable functions must be annotated with a weight and must return a DispatchResult. + #[pallet::call] + impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. - #[weight = 10_000 + T::DbWeight::get().writes(1)] - pub fn do_something(origin, something: u32) -> dispatch::DispatchResult { + #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] + pub fn do_something(origin: OriginFor, something: u32) -> DispatchResultWithPostInfo { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. // https://substrate.dev/docs/en/knowledgebase/runtime/origin let who = ensure_signed(origin)?; // Update storage. - Something::put(something); + >::put(something); // Emit an event. - Self::deposit_event(RawEvent::SomethingStored(something, who)); - // Return a successful DispatchResult - Ok(()) + Self::deposit_event(Event::SomethingStored(something, who)); + // Return a successful DispatchResultWithPostInfo + Ok(().into()) } /// An example dispatchable that may throw a custom error. - #[weight = 10_000 + T::DbWeight::get().reads_writes(1,1)] - pub fn cause_error(origin) -> dispatch::DispatchResult { + #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] + pub fn cause_error(origin: OriginFor) -> DispatchResultWithPostInfo { let _who = ensure_signed(origin)?; // Read a value from storage. - match Something::get() { + match >::get() { // Return an error if the value has not been set. None => Err(Error::::NoneValue)?, Some(old) => { // Increment the value read from storage; will error in the event of overflow. let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; // Update the value in storage with the incremented result. - Something::put(new); - Ok(()) + >::put(new); + Ok(().into()) }, } } diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index a3dff240e4847..d33670f2e9cb0 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -1,30 +1,38 @@ -use crate::{Module, Trait}; +use crate as pallet_template; use sp_core::H256; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::parameter_types; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use frame_system as system; -impl_outer_origin! { - pub enum Origin for Test {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + TemplateModule: pallet_template::{Module, Call, Storage, Event}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const SS58Prefix: u8 = 42; } -impl system::Trait for Test { +impl system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -32,29 +40,21 @@ impl system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; } -impl Trait for Test { - type Event = (); +impl pallet_template::Config for Test { + type Event = Event; } -pub type TemplateModule = Module; - // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { system::GenesisConfig::default().build_storage::().unwrap().into() diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index ed5a114b813f8..dd907f55fbbbe 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -11,44 +11,44 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -pallet-aura = { version = "2.0.0", default-features = false, path = "../../../frame/aura" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } +pallet-aura = { version = "3.0.0", default-features = false, path = "../../../frame/aura" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../../../frame/balances" } +frame-support = { version = "3.0.0", default-features = false, path = "../../../frame/support" } +pallet-grandpa = { version = "3.0.0", default-features = false, path = "../../../frame/grandpa" } +pallet-randomness-collective-flip = { version = "3.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-sudo = { version = "3.0.0", default-features = false, path = "../../../frame/sudo" } +frame-system = { version = "3.0.0", default-features = false, path = "../../../frame/system" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment" } +frame-executive = { version = "3.0.0", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0"} -sp-consensus-aura = { version = "0.8.0", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0"} -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +sp-api = { version = "3.0.0", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "3.0.0"} +sp-consensus-aura = { version = "0.9.0", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "3.0.0"} +sp-offchain = { version = "3.0.0", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "3.0.0", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "3.0.0", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "3.0.0", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } # Used for runtime benchmarking -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } hex-literal = { version = "0.3.1", optional = true } template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } [features] default = ["std"] diff --git a/bin/node-template/runtime/build.rs b/bin/node-template/runtime/build.rs index 9654139121f6f..9b53d2457dffd 100644 --- a/bin/node-template/runtime/build.rs +++ b/bin/node-template/runtime/build.rs @@ -1,9 +1,8 @@ -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .export_heap_base() .import_memory() .build() diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 5028301978cd4..8d68dbdc96868 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -13,7 +13,7 @@ use sp_runtime::{ transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, IdentityLookup, Verify, IdentifyAccount, NumberFor, Saturating, + BlakeTwo256, Block as BlockT, AccountIdLookup, Verify, IdentifyAccount, NumberFor, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -102,6 +102,12 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { transaction_version: 1, }; +/// This determines the average expected block time that we are targetting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_aura` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; @@ -120,29 +126,34 @@ pub fn native_version() -> NativeVersion { } } +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + parameter_types! { + pub const Version: RuntimeVersion = VERSION; pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get() - .saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get(); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - pub const Version: RuntimeVersion = VERSION; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength + ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = (); + /// Block & extrinsics weights: base values and limits. + type BlockWeights = BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; + type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. @@ -159,24 +170,8 @@ impl frame_system::Trait for Runtime { type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; - /// Maximum weight of each block. - type MaximumBlockWeight = MaximumBlockWeight; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The weight of the overhead invoked on the block import process, independent of the - /// extrinsics included in that block. - type BlockExecutionWeight = BlockExecutionWeight; - /// The base weight of any extrinsic processed by the runtime, independent of the - /// logic of that extrinsic. (Signature verification, nonce increment, fee, etc...) - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - /// The maximum weight that a single extrinsic of `Normal` dispatch class can have, - /// idependent of the logic of that extrinsics. (Roughly max block weight - average on - /// initialize cost). - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. - type MaximumBlockLength = MaximumBlockLength; - /// Portion of the block weight that is available to all normal transactions. - type AvailableBlockRatio = AvailableBlockRatio; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. @@ -191,13 +186,15 @@ impl frame_system::Trait for Runtime { type AccountData = pallet_balances::AccountData; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; } -impl pallet_aura::Trait for Runtime { +impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -220,7 +217,7 @@ parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; @@ -233,7 +230,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; /// The type for recording an account's balance. type Balance = Balance; @@ -249,20 +246,20 @@ parameter_types! { pub const TransactionByteFee: Balance = 1; } -impl pallet_transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } /// Configure the pallet template in pallets/template. -impl template::Trait for Runtime { +impl template::Config for Runtime { type Event = Event; } @@ -276,7 +273,7 @@ construct_runtime!( System: frame_system::{Module, Call, Config, Storage, Event}, RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Aura: pallet_aura::{Module, Config, Inherent}, + Aura: pallet_aura::{Module, Config}, Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, Balances: pallet_balances::{Module, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Module, Storage}, @@ -287,7 +284,7 @@ construct_runtime!( ); /// The address format for describing accounts. -pub type Address = AccountId; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. @@ -441,6 +438,12 @@ impl_runtime_apis! { ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } } #[cfg(feature = "runtime-benchmarks")] @@ -451,7 +454,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; use frame_system_benchmarking::Module as SystemBench; - impl frame_system_benchmarking::Trait for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 88362f7e51022..728eb8d6093ce 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -13,31 +13,31 @@ log = "0.4.8" node-primitives = { version = "2.0.0", path = "../primitives" } node-testing = { version = "2.0.0", path = "../testing" } node-runtime = { version = "2.0.0", path = "../runtime" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0", path = "../../../client/api/" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sc-cli = { version = "0.9.0", path = "../../../client/cli" } +sc-client-api = { version = "3.0.0", path = "../../../client/api/" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.9.0", path = "../../../primitives/state-machine" } serde = "1.0.101" serde_json = "1.0.41" structopt = "0.3" derive_more = "0.99.2" -kvdb = "0.7" -kvdb-rocksdb = "0.9.1" -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +kvdb = "0.9.0" +kvdb-rocksdb = "0.11.0" +sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } +sc-basic-authorship = { version = "0.9.0", path = "../../../client/basic-authorship" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" hex = "0.4.0" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -parity-db = { version = "0.1.2" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } +parity-db = { version = "0.2.2" } +sc-transaction-pool = { version = "3.0.0", path = "../../../client/transaction-pool" } futures = { version = "0.3.4", features = ["thread-pool"] } diff --git a/bin/node/bench/src/common.rs b/bin/node/bench/src/common.rs index 2637d6e9bd04d..d04d79e9907af 100644 --- a/bin/node/bench/src/common.rs +++ b/bin/node/bench/src/common.rs @@ -1,7 +1,6 @@ - // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -45,4 +44,4 @@ impl SizeType { SizeType::Custom(val) => Some(*val), } } -} \ No newline at end of file +} diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 5506dc426de0b..a8a02f19c306e 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 6faa7b72721f4..26b7f92b14483 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 759a4299c7275..c540ae147c9f0 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index ae28a20089e10..b4fee58dac025 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 46b659dd88387..40e9e1577777e 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index 3cfd7ddb300a9..a29b51a38af58 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/state_sizes.rs b/bin/node/bench/src/state_sizes.rs index d35989f61be34..f9288c1054898 100644 --- a/bin/node/bench/src/state_sizes.rs +++ b/bin/node/bench/src/state_sizes.rs @@ -1,18 +1,20 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. +// This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Parity is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// along with this program. If not, see . /// Kusama value size distribution pub const KUSAMA_STATE_DISTRIBUTION: &'static[(u32, u32)] = &[ @@ -4753,4 +4755,4 @@ pub const KUSAMA_STATE_DISTRIBUTION: &'static[(u32, u32)] = &[ (1516670, 1), (1605731, 1), (1605821, 1), -]; \ No newline at end of file +]; diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 4020fd1029368..31ef71fba7b5e 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{io, sync::Arc}; +use std::{io, path::PathBuf, sync::Arc}; use kvdb::{KeyValueDB, DBTransaction}; use kvdb_rocksdb::{DatabaseConfig, Database}; @@ -124,7 +124,7 @@ impl Clone for TempDatabase { .map(|f_result| f_result.expect("failed to read file in seed db") .path() - ).collect(); + ).collect::>(); fs_extra::copy_items( &self_db_files, new_dir.path(), diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index eb6c574e27170..a3e7620473d98 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/bench/src/txpool.rs b/bin/node/bench/src/txpool.rs index 7ea13fc15ec68..b3646a92e032a 100644 --- a/bin/node/bench/src/txpool.rs +++ b/bin/node/bench/src/txpool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -74,6 +74,7 @@ impl core::Benchmark for PoolBenchmark { let executor = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, executor, context.client.clone(), diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index c90c4a293f49a..66e7b398dd16f 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,14 +8,14 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.34.0", default-features = false } jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" -wasm-bindgen = { version = "=0.2.67", features = ["serde-serialize"] } -wasm-bindgen-futures = "0.4.10" -wasm-bindgen-test = "0.3.10" -futures = "0.3.4" +wasm-bindgen = { version = "=0.2.69", features = ["serde-serialize"] } +wasm-bindgen-futures = "0.4.18" +wasm-bindgen-test = "0.3.18" +futures = "0.3.9" node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0"} -sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0"} +sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.9.0"} diff --git a/bin/node/browser-testing/src/lib.rs b/bin/node/browser-testing/src/lib.rs index 777e5ea9f132e..ad18de87b3d3e 100644 --- a/bin/node/browser-testing/src/lib.rs +++ b/bin/node/browser-testing/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ //! ``` //! For debug infomation, such as the informant, run without the `--headless` //! flag and open a browser to the url that `wasm-pack test` outputs. -//! For more infomation see https://rustwasm.github.io/docs/wasm-pack/. +//! For more infomation see . use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use wasm_bindgen_futures::JsFuture; diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index e396b2dcefff5..99d6f5216d212 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -34,59 +34,60 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } serde = { version = "1.0.102", features = ["derive"] } -futures = { version = "0.3.1", features = ["compat"] } +futures = { version = "0.3.9", features = ["compat"] } hex-literal = "0.3.1" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } -tracing = "0.1.19" -parking_lot = "0.10.0" +parking_lot = "0.11.1" # primitives -sp-authority-discovery = { version = "2.0.0", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sp-authority-discovery = { version = "3.0.0", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "3.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } # client dependencies -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sc-network = { version = "0.8.0", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8.0", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8.0", path = "../../../client/authority-discovery" } +sc-client-api = { version = "3.0.0", path = "../../../client/api" } +sc-chain-spec = { version = "3.0.0", path = "../../../client/chain-spec" } +sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } +sc-transaction-pool = { version = "3.0.0", path = "../../../client/transaction-pool" } +sc-network = { version = "0.9.0", path = "../../../client/network" } +sc-consensus-slots = { version = "0.9.0", path = "../../../client/consensus/slots" } +sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } +grandpa = { version = "0.9.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.9.0", default-features = false, path = "../../../client/db" } +sc-offchain = { version = "3.0.0", path = "../../../client/offchain" } +sc-rpc = { version = "3.0.0", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.9.0", path = "../../../client/basic-authorship" } +sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "3.0.0", path = "../../../client/tracing" } +sc-telemetry = { version = "3.0.0", path = "../../../client/telemetry" } +sc-authority-discovery = { version = "0.9.0", path = "../../../client/authority-discovery" } +sc-finality-grandpa-warp-sync = { version = "0.8.0", path = "../../../client/finality-grandpa-warp-sync", optional = true } # frame dependencies -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-indices = { version = "3.0.0", path = "../../../frame/indices" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../../../frame/timestamp" } pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0", path = "../../../frame/authority-discovery" } -pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } +frame-system = { version = "3.0.0", path = "../../../frame/system" } +pallet-balances = { version = "3.0.0", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "3.0.0", path = "../../../frame/transaction-payment" } +frame-support = { version = "3.0.0", default-features = false, path = "../../../frame/support" } +pallet-im-online = { version = "3.0.0", default-features = false, path = "../../../frame/im-online" } +pallet-authority-discovery = { version = "3.0.0", path = "../../../frame/authority-discovery" } +pallet-staking = { version = "3.0.0", path = "../../../frame/staking" } +pallet-grandpa = { version = "3.0.0", path = "../../../frame/grandpa" } # node-specific dependencies node-runtime = { version = "2.0.0", path = "../runtime" } @@ -95,44 +96,44 @@ node-primitives = { version = "2.0.0", path = "../primitives" } node-executor = { version = "2.0.0", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } +sc-cli = { version = "0.9.0", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "3.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } -wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0"} +wasm-bindgen-futures = { version = "0.4.18", optional = true } +browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.9.0"} [target.'cfg(target_arch="x86_64")'.dependencies] node-executor = { version = "2.0.0", path = "../executor", features = [ "wasmtime" ] } -sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } -sp-trie = { version = "2.0.0", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } +sc-cli = { version = "0.9.0", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } +sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } +sp-trie = { version = "3.0.0", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } [dev-dependencies] -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.8.0", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0", path = "../../../client/consensus/epochs" } +sc-keystore = { version = "3.0.0", path = "../../../client/keystore" } +sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } +sc-consensus-babe = { version = "0.9.0", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.9.0", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } -futures = "0.3.4" +futures = "0.3.9" tempfile = "3.1.0" assert_cmd = "1.0" -nix = "0.17" +nix = "0.19" serde_json = "1.0" regex = "1" -platforms = "0.2.1" +platforms = "1.1" [build-dependencies] structopt = { version = "0.3.8", optional = true } node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } -substrate-build-script-utils = { version = "2.0.0", optional = true, path = "../../../utils/build-script-utils" } -substrate-frame-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/frame-utilities-cli" } +frame-benchmarking-cli = { version = "3.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } +substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } +substrate-frame-cli = { version = "3.0.0", optional = true, path = "../../../utils/frame/frame-utilities-cli" } [build-dependencies.sc-cli] -version = "0.8.0" +version = "0.9.0" package = "sc-cli" path = "../../../client/cli" optional = true @@ -151,6 +152,7 @@ cli = [ "frame-benchmarking-cli", "substrate-frame-cli", "sc-service/db", + "sc-finality-grandpa-warp-sync", "structopt", "substrate-build-script-utils", ] diff --git a/bin/node/cli/bin/main.rs b/bin/node/cli/bin/main.rs index 299b760c82e36..cf32a7cf28860 100644 --- a/bin/node/cli/bin/main.rs +++ b/bin/node/cli/bin/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/browser-demo/build.sh b/bin/node/cli/browser-demo/build.sh index be52b7a523f01..8840106daeb5c 100755 --- a/bin/node/cli/browser-demo/build.sh +++ b/bin/node/cli/browser-demo/build.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +set -e -x cargo +nightly build --release -p node-cli --target wasm32-unknown-unknown --no-default-features --features browser -Z features=itarget wasm-bindgen ../../../../target/wasm32-unknown-unknown/release/node_cli.wasm --out-dir pkg --target web python -m http.server 8000 diff --git a/bin/node/cli/browser-demo/index.html b/bin/node/cli/browser-demo/index.html index 60acfde39f559..4a706906ab108 100644 --- a/bin/node/cli/browser-demo/index.html +++ b/bin/node/cli/browser-demo/index.html @@ -27,8 +27,8 @@ setInterval(() => { client - .rpcSend('{"method":"system_networkState","params":[],"id":1,"jsonrpc":"2.0"}') - .then((r) => log("Network state: " + r)); + .rpcSend('{"method":"system_localPeerId","params":[],"id":1,"jsonrpc":"2.0"}') + .then((r) => log("Local PeerId: " + r)); }, 20000); } diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index a36f0d01a0a03..befcdaea6d9cf 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/src/browser.rs b/bin/node/cli/src/browser.rs index 41770f5fcde6d..6c0a2f10d95e5 100644 --- a/bin/node/cli/src/browser.rs +++ b/bin/node/cli/src/browser.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,9 +21,8 @@ use log::info; use wasm_bindgen::prelude::*; use browser_utils::{ Client, - browser_configuration, set_console_error_panic_hook, init_console_log, + browser_configuration, init_logging_and_telemetry, set_console_error_panic_hook, }; -use std::str::FromStr; /// Starts the client. #[wasm_bindgen] @@ -33,20 +32,27 @@ pub async fn start_client(chain_spec: Option, log_level: String) -> Resu .map_err(|err| JsValue::from_str(&err.to_string())) } -async fn start_inner(chain_spec: Option, log_level: String) -> Result> { +async fn start_inner( + chain_spec: Option, + log_directives: String, +) -> Result> { set_console_error_panic_hook(); - init_console_log(log::Level::from_str(&log_level)?)?; + let telemetry_worker = init_logging_and_telemetry(&log_directives)?; let chain_spec = match chain_spec { Some(chain_spec) => ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) .map_err(|e| format!("{:?}", e))?, None => crate::chain_spec::development_config(), }; - let config = browser_configuration(chain_spec).await?; + let telemetry_handle = telemetry_worker.handle(); + let config = browser_configuration( + chain_spec, + Some(telemetry_handle), + ).await?; info!("Substrate browser node"); info!("✌️ version {}", config.impl_version); - info!("❤️ by Parity Technologies, 2017-2020"); + info!("❤️ by Parity Technologies, 2017-2021"); info!("📋 Chain specification: {}", config.chain_spec.name()); info!("🏷 Node name: {}", config.network.node_name); info!("👤 Role: {:?}", config.role); @@ -54,8 +60,10 @@ async fn start_inner(chain_spec: Option, log_level: String) -> Result>, enable_println: bool, ) -> GenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { + let mut endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -234,10 +234,16 @@ pub fn testnet_genesis( get_account_id_from_seed::("Ferdie//stash"), ] }); + initial_authorities.iter().for_each(|x| + if !endowed_accounts.contains(&x.0) { + endowed_accounts.push(x.0.clone()) + } + ); + let num_endowed_accounts = endowed_accounts.len(); const ENDOWMENT: Balance = 10_000_000 * DOLLARS; - const STASH: Balance = 100 * DOLLARS; + const STASH: Balance = ENDOWMENT / 1000; GenesisConfig { frame_system: Some(SystemConfig { @@ -246,9 +252,8 @@ pub fn testnet_genesis( }), pallet_balances: Some(BalancesConfig { balances: endowed_accounts.iter().cloned() - .map(|k| (k, ENDOWMENT)) - .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), + .map(|x| (x, ENDOWMENT)) + .collect() }), pallet_indices: Some(IndicesConfig { indices: vec![], @@ -436,7 +441,7 @@ pub(crate) mod tests { Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) }, |config| { - let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; + let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) } ); diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 2130ff1e4b106..63a07e00e2197 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index f8a0f3f9b3a34..461930a613d97 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -49,15 +49,18 @@ impl SubstrateCli for Cli { } fn load_spec(&self, id: &str) -> std::result::Result, String> { - Ok(match id { - "dev" => Box::new(chain_spec::development_config()), - "local" => Box::new(chain_spec::local_testnet_config()), - "" | "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), - "staging" => Box::new(chain_spec::staging_testnet_config()), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), - }) + let spec = + match id { + "" => return Err("Please specify which chain you want to run, e.g. --dev or --chain=local".into()), + "dev" => Box::new(chain_spec::development_config()), + "local" => Box::new(chain_spec::local_testnet_config()), + "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), + "staging" => Box::new(chain_spec::staging_testnet_config()), + path => Box::new(chain_spec::ChainSpec::from_json_file( + std::path::PathBuf::from(path), + )?), + }; + Ok(spec) } fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { @@ -76,7 +79,7 @@ pub fn run() -> Result<()> { match config.role { Role::Light => service::new_light(config), _ => service::new_full(config), - } + }.map_err(sc_cli::Error::Service) }) } Some(Subcommand::Inspect(cmd)) => { @@ -94,7 +97,7 @@ pub fn run() -> Result<()> { You can enable it with `--features runtime-benchmarks`.".into()) } } - Some(Subcommand::Key(cmd)) => cmd.run(), + Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::Sign(cmd)) => cmd.run(), Some(Subcommand::Verify(cmd)) => cmd.run(), Some(Subcommand::Vanity(cmd)) => cmd.run(), diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index bd2298514a7a2..d29836c7499f3 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index ecf50dc14634b..dcce31bd32257 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,7 +22,6 @@ use std::sync::Arc; use sc_consensus_babe; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_primitives::Block; use node_runtime::RuntimeApi; use sc_service::{ @@ -35,6 +34,7 @@ use sp_runtime::traits::Block as BlockT; use futures::prelude::*; use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_executor::Executor; +use sc_telemetry::TelemetryConnectionNotifier; type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; @@ -57,10 +57,7 @@ pub fn new_partial(config: &Configuration) -> Result, sc_consensus_babe::BabeLink, ), - ( - grandpa::SharedVoterState, - Arc>, - ), + grandpa::SharedVoterState, ) >, ServiceError> { let (client, backend, keystore_container, task_manager) = @@ -71,6 +68,7 @@ pub fn new_partial(config: &Configuration) -> Result Result Result Result, &sc_consensus_babe::BabeLink, ) ) -> Result { let sc_service::PartialComponents { - client, backend, mut task_manager, import_queue, keystore_container, - select_chain, transaction_pool, inherent_data_providers, + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + inherent_data_providers, other: (rpc_extensions_builder, import_setup, rpc_setup), } = new_partial(&config)?; - let (shared_voter_state, finality_proof_provider) = rpc_setup; + let shared_voter_state = rpc_setup; + + config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); + + #[cfg(feature = "cli")] + config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( + &config, task_manager.spawn_handle(), backend.clone(), + )); let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -192,8 +210,6 @@ pub fn new_full_base( import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), })?; if config.offchain_worker.enabled { @@ -204,26 +220,28 @@ pub fn new_full_base( let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks = + Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - config, - backend: backend.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - network: network.clone(), - rpc_extensions_builder: Box::new(rpc_extensions_builder), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - on_demand: None, - remote_blockchain: None, - telemetry_connection_sinks: telemetry_connection_sinks.clone(), - network_status_sinks: network_status_sinks.clone(), - system_rpc_tx, - })?; + let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( + sc_service::SpawnTasksParams { + config, + backend: backend.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + network: network.clone(), + rpc_extensions_builder: Box::new(rpc_extensions_builder), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + on_demand: None, + remote_blockchain: None, + network_status_sinks: network_status_sinks.clone(), + system_rpc_tx, + }, + )?; let (block_import, grandpa_link, babe_link) = import_setup; @@ -249,6 +267,7 @@ pub fn new_full_base( sync_oracle: network.clone(), inherent_data_providers: inherent_data_providers.clone(), force_authoring, + backoff_authoring_blocks, babe_link, can_author_with, }; @@ -307,7 +326,7 @@ pub fn new_full_base( config, link: grandpa_link, network: network.clone(), - telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), + telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state, @@ -319,8 +338,6 @@ pub fn new_full_base( "grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)? ); - } else { - grandpa::setup_disabled_grandpa(network.clone())?; } network_starter.start_network(); @@ -342,14 +359,16 @@ pub fn new_full(config: Configuration) }) } -pub fn new_light_base(config: Configuration) -> Result<( - TaskManager, RpcHandlers, Arc, +pub fn new_light_base(mut config: Configuration) -> Result<( + TaskManager, RpcHandlers, Option, Arc, Arc::Hash>>, Arc>> ), ServiceError> { let (client, backend, keystore_container, mut task_manager, on_demand) = sc_service::new_light_parts::(&config)?; + config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( @@ -360,14 +379,12 @@ pub fn new_light_base(config: Configuration) -> Result<( on_demand.clone(), )); - let grandpa_block_import = grandpa::light_block_import( - client.clone(), backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()), + let (grandpa_block_import, _) = grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + let justification_import = grandpa_block_import.clone(); let (babe_block_import, babe_link) = sc_consensus_babe::block_import( sc_consensus_babe::Config::get_or_compute(&*client)?, @@ -380,8 +397,7 @@ pub fn new_light_base(config: Configuration) -> Result<( let import_queue = sc_consensus_babe::import_queue( babe_link, babe_block_import, - None, - Some(Box::new(finality_proof_import)), + Some(Box::new(justification_import)), client.clone(), select_chain.clone(), inherent_data_providers.clone(), @@ -390,9 +406,6 @@ pub fn new_light_base(config: Configuration) -> Result<( sp_consensus::NeverCanAuthor, )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, @@ -402,8 +415,6 @@ pub fn new_light_base(config: Configuration) -> Result<( import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), })?; network_starter.start_network(); @@ -422,7 +433,7 @@ pub fn new_light_base(config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); - let rpc_handlers = + let (rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams { on_demand: Some(on_demand), remote_blockchain: Some(backend.remote_blockchain()), @@ -432,16 +443,22 @@ pub fn new_light_base(config: Configuration) -> Result<( keystore: keystore_container.sync_keystore(), config, backend, network_status_sinks, system_rpc_tx, network: network.clone(), - telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), task_manager: &mut task_manager, })?; - Ok((task_manager, rpc_handlers, client, network, transaction_pool)) + Ok(( + task_manager, + rpc_handlers, + telemetry_connection_notifier, + client, + network, + transaction_pool, + )) } /// Builds a new service for a light client. pub fn new_light(config: Configuration) -> Result { - new_light_base(config).map(|(task_manager, _, _, _, _)| { + new_light_base(config).map(|(task_manager, _, _, _, _, _)| { task_manager }) } @@ -495,7 +512,7 @@ mod tests { let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); // For the block factory - let mut slot_num = 1u64; + let mut slot = 1u64; // For the extrinsics factory let bob = Arc::new(AccountKeyring::Bob.pair()); @@ -523,7 +540,7 @@ mod tests { Ok((node, (inherent_data_providers, setup_handles.unwrap()))) }, |config| { - let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; + let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) }, |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { @@ -556,7 +573,7 @@ mod tests { descendent_query(&*service.client()), &parent_hash, parent_number, - slot_num, + slot.into(), ).unwrap().unwrap(); let mut digest = Digest::::default(); @@ -564,9 +581,9 @@ mod tests { // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. let babe_pre_digest = loop { - inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); + inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot * SLOT_DURATION)); if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( - slot_num, + slot.into(), &parent_header, &*service.client(), keystore.clone(), @@ -575,7 +592,7 @@ mod tests { break babe_pre_digest; } - slot_num += 1; + slot += 1; }; digest.push(::babe_pre_digest(babe_pre_digest)); @@ -606,7 +623,7 @@ mod tests { let item = ::babe_seal( signature, ); - slot_num += 1; + slot += 1; let mut params = BlockImportParams::new(BlockOrigin::File, new_header); params.post_digests.push(item); @@ -681,7 +698,7 @@ mod tests { Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) }, |config| { - let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; + let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) }, vec![ diff --git a/bin/node/cli/tests/build_spec_works.rs b/bin/node/cli/tests/build_spec_works.rs index 800a4a8c51e61..6d863ea7f949d 100644 --- a/bin/node/cli/tests/build_spec_works.rs +++ b/bin/node/cli/tests/build_spec_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 34078b08cf074..39963fb002876 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 61a07dd1ca877..c3bb96555da56 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 557e722ddb7b5..02fba49e834ef 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index aa9653acadba5..67dbc97056cf6 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 001bed8b136f5..4c0727d26cb17 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index bd79dcd77a49a..05eb9a7027b71 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index 9351568d87955..0152ddb464dc7 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/version.rs b/bin/node/cli/tests/version.rs index bbc9139d4f0f8..38e4b1fbda72e 100644 --- a/bin/node/cli/tests/version.rs +++ b/bin/node/cli/tests/version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index f7bef798e4d02..b67c29889d30e 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -12,35 +12,35 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } node-primitives = { version = "2.0.0", path = "../primitives" } node-runtime = { version = "2.0.0", path = "../runtime" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } +sc-executor = { version = "0.9.0", path = "../../../client/executor" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } +sp-state-machine = { version = "0.9.0", path = "../../../primitives/state-machine" } +sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "3.0.0", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0", path = "../../../frame/support" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } +frame-support = { version = "3.0.0", path = "../../../frame/support" } +frame-system = { version = "3.0.0", path = "../../../frame/system" } node-testing = { version = "2.0.0", path = "../testing" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } +pallet-balances = { version = "3.0.0", path = "../../../frame/balances" } pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } +pallet-grandpa = { version = "3.0.0", path = "../../../frame/grandpa" } +pallet-im-online = { version = "3.0.0", path = "../../../frame/im-online" } +pallet-indices = { version = "3.0.0", path = "../../../frame/indices" } +pallet-session = { version = "3.0.0", path = "../../../frame/session" } +pallet-timestamp = { version = "3.0.0", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "3.0.0", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "3.0.0", path = "../../../frame/treasury" } +sp-application-crypto = { version = "3.0.0", path = "../../../primitives/application-crypto" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.9.0", path = "../../../primitives/externalities" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } wat = "1.0" diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 168cff0ff4568..554e6c4af428d 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index 4c3b82bc7d3b5..e7fb09a19c514 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 723e3a7e4ba62..d27954d3a721a 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ use codec::{Encode, Decode, Joiner}; use frame_support::{ - StorageValue, StorageMap, + StorageMap, traits::Currency, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, }; @@ -27,7 +27,6 @@ use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, }; -use pallet_contracts::ContractAddressFor; use frame_system::{self, EventRecord, Phase}; use node_runtime::{ @@ -193,7 +192,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode() ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -222,11 +221,11 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key().to_vec(), @@ -265,11 +264,11 @@ fn successful_execution_with_foreign_code_gives_ok() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key().to_vec(), @@ -330,14 +329,14 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } )), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances(pallet_balances::RawEvent::Transfer( + event: Event::pallet_balances(pallet_balances::Event::Transfer( alice().into(), bob().into(), 69 * DOLLARS, @@ -351,7 +350,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], @@ -382,7 +381,7 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } )), topics: vec![], @@ -390,7 +389,7 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: Event::pallet_balances( - pallet_balances::RawEvent::Transfer( + pallet_balances::Event::Transfer( bob().into(), alice().into(), 5 * DOLLARS, @@ -405,7 +404,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], @@ -413,7 +412,7 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: Event::pallet_balances( - pallet_balances::RawEvent::Transfer( + pallet_balances::Event::Transfer( alice().into(), bob().into(), 15 * DOLLARS, @@ -428,7 +427,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], @@ -581,15 +580,15 @@ const CODE_TRANSFER: &str = r#" #[test] fn deploying_wasm_contract_should_work() { let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap(); - let transfer_ch = ::Hashing::hash(&transfer_code); + let transfer_ch = ::Hashing::hash(&transfer_code); - let addr = ::DetermineContractAddress::contract_address_for( + let addr = pallet_contracts::Module::::contract_address( + &charlie(), &transfer_ch, &[], - &charlie(), ); - let subsistence = pallet_contracts::Config::::subsistence_threshold_uncached(); + let subsistence = pallet_contracts::Module::::subsistence_threshold(); let b = construct_block( &mut new_test_ext(compact_code_unwrap(), false), @@ -603,25 +602,20 @@ fn deploying_wasm_contract_should_work() { CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), function: Call::Contracts( - pallet_contracts::Call::put_code::(transfer_code) - ), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), - function: Call::Contracts( - pallet_contracts::Call::instantiate::( - 1 * DOLLARS + subsistence, + pallet_contracts::Call::instantiate_with_code::( + 1000 * DOLLARS + subsistence, 500_000_000, - transfer_ch, - Vec::new() + transfer_code, + Vec::new(), + Vec::new(), ) ), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(2, 0))), + signed: Some((charlie(), signed_extra(1, 0))), function: Call::Contracts( pallet_contracts::Call::call::( - pallet_indices::address::Address::Id(addr.clone()), + sp_runtime::MultiAddress::Id(addr.clone()), 10, 500_000_000, vec![0x00, 0x01, 0x02, 0x03] @@ -702,7 +696,7 @@ fn panic_execution_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -731,11 +725,11 @@ fn successful_execution_gives_ok() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key().to_vec(), diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index efc54ebebf199..b376ebc35bae8 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index d04af1d827009..2e92077c4ada3 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,6 @@ use codec::{Encode, Joiner}; use frame_support::{ - StorageValue, StorageMap, traits::Currency, weights::{GetDispatchInfo, constants::ExtrinsicBaseWeight, IdentityFee, WeightToFeePolynomial}, }; @@ -121,6 +120,15 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }); } +fn new_account_info(free_dollars: u128) -> Vec { + frame_system::AccountInfo { + nonce: 0u32, + consumers: 0, + providers: 0, + data: (free_dollars * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS), + }.encode() +} + #[test] fn transaction_fee_is_correct() { // This uses the exact values of substrate-node. @@ -131,14 +139,8 @@ fn transaction_fee_is_correct() { // - 1 milli-dot based on current polkadot runtime. // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) let mut t = new_test_ext(compact_code_unwrap(), false); - t.insert( - >::hashed_key_for(alice()), - (0u32, 0u32, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() - ); - t.insert( - >::hashed_key_for(bob()), - (0u32, 0u32, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() - ); + t.insert(>::hashed_key_for(alice()), new_account_info(100)); + t.insert(>::hashed_key_for(bob()), new_account_info(10)); t.insert( >::hashed_key().to_vec(), (110 * DOLLARS).encode() diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 5bac6b5e374c7..ff483d9ecd8cd 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -217,7 +217,6 @@ fn should_submit_signed_twice_from_all_accounts() { #[test] fn submitted_transaction_should_be_valid() { use codec::Encode; - use frame_support::storage::StorageMap; use sp_runtime::transaction_validity::{TransactionSource, TransactionTag}; use sp_runtime::traits::StaticLookup; @@ -253,7 +252,7 @@ fn submitted_transaction_should_be_valid() { let author = extrinsic.signature.clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - let account = frame_system::AccountInfo { nonce: 0, refcount: 0, data }; + let account = frame_system::AccountInfo { nonce: 0, consumers: 0, providers: 0, data }; >::insert(&address, account); // check validity diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 3686ddf27669b..3d89a68aed309 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -11,13 +11,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } derive_more = "0.99" log = "0.4.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-cli = { version = "0.9.0", path = "../../../client/cli" } +sc-client-api = { version = "3.0.0", path = "../../../client/api" } +sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } structopt = "0.3.8" diff --git a/bin/node/inspect/src/cli.rs b/bin/node/inspect/src/cli.rs index d66644bab52fa..abdbedc296d02 100644 --- a/bin/node/inspect/src/cli.rs +++ b/bin/node/inspect/src/cli.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs index fae6c10c7fe78..a1a9c947a561b 100644 --- a/bin/node/inspect/src/command.rs +++ b/bin/node/inspect/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 02f5614b81a78..2a55fdcda62ee 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. // -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 305764970c149..043ec5ab21cec 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -11,14 +11,14 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../../primitives/application-crypto" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-system = { version = "3.0.0", default-features = false, path = "../../../frame/system" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../../primitives/application-crypto" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } +sp-serializer = { version = "3.0.0", path = "../../../primitives/serializer" } pretty_assertions = "0.6.1" [features] diff --git a/bin/node/primitives/src/lib.rs b/bin/node/primitives/src/lib.rs index 137fb1d94c778..9470adc399f96 100644 --- a/bin/node/primitives/src/lib.rs +++ b/bin/node/primitives/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index 9f358e901dafa..d0e8798862dba 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -12,9 +12,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.1.29" -hyper = "0.12.35" -jsonrpc-core-client = { version = "15.0.0", default-features = false, features = ["http"] } +hyper = "~0.13.9" +jsonrpc-core-client = { version = "15.1.0", default-features = false, features = ["http"] } log = "0.4.8" node-primitives = { version = "2.0.0", path = "../primitives" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } +sc-rpc = { version = "3.0.0", path = "../../../client/rpc" } diff --git a/bin/node/rpc-client/src/main.rs b/bin/node/rpc-client/src/main.rs index 31f1efa28ccd0..ddd8a50ad36e4 100644 --- a/bin/node/rpc-client/src/main.rs +++ b/bin/node/rpc-client/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index aef4a82db776a..3e0e77e030f10 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -11,28 +11,28 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpc-core = "15.0.0" +jsonrpc-core = "15.1.0" node-primitives = { version = "2.0.0", path = "../primitives" } node-runtime = { version = "2.0.0", path = "../runtime" } pallet-contracts-rpc = { version = "0.8.0", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8.0", path = "../../../client/consensus/babe/rpc" } -sc-consensus-epochs = { version = "0.8.0", path = "../../../client/consensus/epochs" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } -sc-finality-grandpa = { version = "0.8.0", path = "../../../client/finality-grandpa" } -sc-finality-grandpa-rpc = { version = "0.8.0", path = "../../../client/finality-grandpa/rpc" } -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-rpc-api = { version = "0.8.0", path = "../../../client/rpc-api" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sc-sync-state-rpc = { version = "0.8.0", path = "../../../client/sync-state-rpc" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "3.0.0", path = "../../../frame/transaction-payment/rpc/" } +sc-client-api = { version = "3.0.0", path = "../../../client/api" } +sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.9.0", path = "../../../client/consensus/babe/rpc" } +sc-consensus-epochs = { version = "0.9.0", path = "../../../client/consensus/epochs" } +sc-chain-spec = { version = "3.0.0", path = "../../../client/chain-spec" } +sc-finality-grandpa = { version = "0.9.0", path = "../../../client/finality-grandpa" } +sc-finality-grandpa-rpc = { version = "0.9.0", path = "../../../client/finality-grandpa/rpc" } +sc-keystore = { version = "3.0.0", path = "../../../client/keystore" } +sc-rpc-api = { version = "0.9.0", path = "../../../client/rpc-api" } +sc-rpc = { version = "3.0.0", path = "../../../client/rpc" } +sc-sync-state-rpc = { version = "0.9.0", path = "../../../client/sync-state-rpc" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-block-builder = { version = "3.0.0", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } +substrate-frame-rpc-system = { version = "3.0.0", path = "../../../utils/frame/rpc/system" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 1ced3d60ab362..e68ca6843bc94 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 80c914ff57580..f77a16a10f4c6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-runtime" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -14,87 +14,93 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -integer-sqrt = { version = "0.1.2" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" hex-literal = { version = "0.3.1", optional = true } # primitives -sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0"} -sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } +sp-authority-discovery = { version = "3.0.0", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.9.0", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "3.0.0"} +sp-inherents = { version = "3.0.0", default-features = false, path = "../../../primitives/inherents" } node-primitives = { version = "2.0.0", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +sp-offchain = { version = "3.0.0", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "3.0.0", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "3.0.0", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "3.0.0", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "3.0.0", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "3.0.0", default-features = false, path = "../../../primitives/version" } # frame dependencies -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0", default-features = false, path = "../../../frame/collective" } +frame-executive = { version = "3.0.0", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../../../frame/support" } +frame-system = { version = "3.0.0", default-features = false, path = "../../../frame/system" } +frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-assets = { version = "3.0.0", default-features = false, path = "../../../frame/assets" } +pallet-authority-discovery = { version = "3.0.0", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "3.0.0", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "3.0.0", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../../../frame/balances" } +pallet-bounties = { version = "3.0.0", default-features = false, path = "../../../frame/bounties" } +pallet-collective = { version = "3.0.0", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/common/" } pallet-contracts-rpc-runtime-api = { version = "0.8.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0", default-features = false, path = "../../../frame/membership" } -pallet-multisig = { version = "2.0.0", default-features = false, path = "../../../frame/multisig" } -pallet-offences = { version = "2.0.0", default-features = false, path = "../../../frame/offences" } -pallet-offences-benchmarking = { version = "2.0.0", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-proxy = { version = "2.0.0", default-features = false, path = "../../../frame/proxy" } -pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-session-benchmarking = { version = "2.0.0", path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { version = "2.0.0", default-features = false, path = "../../../frame/staking" } -pallet-staking-reward-curve = { version = "2.0.0", default-features = false, path = "../../../frame/staking/reward-curve" } -pallet-scheduler = { version = "2.0.0", default-features = false, path = "../../../frame/scheduler" } -pallet-society = { version = "2.0.0", default-features = false, path = "../../../frame/society" } -pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0", default-features = false, path = "../../../frame/vesting" } +pallet-democracy = { version = "3.0.0", default-features = false, path = "../../../frame/democracy" } +pallet-elections-phragmen = { version = "3.0.0", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-grandpa = { version = "3.0.0", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "3.0.0", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "3.0.0", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "3.0.0", default-features = false, path = "../../../frame/identity" } +pallet-lottery = { version = "3.0.0", default-features = false, path = "../../../frame/lottery" } +pallet-membership = { version = "3.0.0", default-features = false, path = "../../../frame/membership" } +pallet-mmr = { version = "3.0.0", default-features = false, path = "../../../frame/merkle-mountain-range" } +pallet-multisig = { version = "3.0.0", default-features = false, path = "../../../frame/multisig" } +pallet-offences = { version = "3.0.0", default-features = false, path = "../../../frame/offences" } +pallet-offences-benchmarking = { version = "3.0.0", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } +pallet-proxy = { version = "3.0.0", default-features = false, path = "../../../frame/proxy" } +pallet-randomness-collective-flip = { version = "3.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "3.0.0", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "3.0.0", features = ["historical"], path = "../../../frame/session", default-features = false } +pallet-session-benchmarking = { version = "3.0.0", path = "../../../frame/session/benchmarking", default-features = false, optional = true } +pallet-staking = { version = "3.0.0", default-features = false, path = "../../../frame/staking" } +pallet-staking-reward-curve = { version = "3.0.0", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-scheduler = { version = "3.0.0", default-features = false, path = "../../../frame/scheduler" } +pallet-society = { version = "3.0.0", default-features = false, path = "../../../frame/society" } +pallet-sudo = { version = "3.0.0", default-features = false, path = "../../../frame/sudo" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-tips = { version = "3.0.0", default-features = false, path = "../../../frame/tips" } +pallet-treasury = { version = "3.0.0", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "3.0.0", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-vesting = { version = "3.0.0", default-features = false, path = "../../../frame/vesting" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } [features] default = ["std"] with-tracing = [ "frame-executive/with-tracing" ] std = [ "sp-authority-discovery/std", + "pallet-assets/std", "pallet-authority-discovery/std", "pallet-authorship/std", "sp-consensus-babe/std", "pallet-babe/std", "pallet-balances/std", + "pallet-bounties/std", "sp-block-builder/std", "codec/std", "pallet-collective/std", @@ -108,7 +114,9 @@ std = [ "pallet-im-online/std", "pallet-indices/std", "sp-inherents/std", + "pallet-lottery/std", "pallet-membership/std", + "pallet-mmr/std", "pallet-multisig/std", "pallet-identity/std", "pallet-scheduler/std", @@ -133,6 +141,7 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-timestamp/std", + "pallet-tips/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", "pallet-treasury/std", @@ -148,8 +157,10 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-bounties/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-contracts/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", @@ -158,12 +169,15 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", + "pallet-lottery/runtime-benchmarks", + "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-tips/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/bin/node/runtime/build.rs b/bin/node/runtime/build.rs index 4f111bc993007..a1c4b2d892cfe 100644 --- a/bin/node/runtime/build.rs +++ b/bin/node/runtime/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/bin/node/runtime/src/constants.rs b/bin/node/runtime/src/constants.rs index 8e87d61c1e6b5..f447486c7ffc4 100644 --- a/bin/node/runtime/src/constants.rs +++ b/bin/node/runtime/src/constants.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,7 +50,7 @@ pub mod time { /// always be assigned, in which case `MILLISECS_PER_BLOCK` and /// `SLOT_DURATION` should have the same value. /// - /// + /// pub const MILLISECS_PER_BLOCK: Moment = 3000; pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 16666997b3a55..c6a56e5ac0dab 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,13 +34,15 @@ mod multiplier_tests { use crate::{ constants::{currency::*, time::*}, - TransactionPayment, MaximumBlockWeight, AvailableBlockRatio, Runtime, TargetBlockFullness, + TransactionPayment, Runtime, TargetBlockFullness, AdjustmentVariable, System, MinimumMultiplier, + RuntimeBlockWeights as BlockWeights, }; - use frame_support::weights::{Weight, WeightToFeePolynomial}; + use frame_support::weights::{Weight, WeightToFeePolynomial, DispatchClass}; - fn max() -> Weight { - AvailableBlockRatio::get() * MaximumBlockWeight::get() + fn max_normal() -> Weight { + BlockWeights::get().get(DispatchClass::Normal).max_total + .unwrap_or_else(|| BlockWeights::get().max_block) } fn min_multiplier() -> Multiplier { @@ -48,7 +50,7 @@ mod multiplier_tests { } fn target() -> Weight { - TargetBlockFullness::get() * max() + TargetBlockFullness::get() * max_normal() } // update based on runtime impl. @@ -69,7 +71,7 @@ mod multiplier_tests { let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); // maximum tx weight - let m = max() as f64; + let m = max_normal() as f64; // block weight always truncated to max weight let block_weight = (block_weight as f64).min(m); let v: f64 = AdjustmentVariable::get().to_fraction(); @@ -89,7 +91,7 @@ mod multiplier_tests { let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default().build_storage::().unwrap().into(); t.execute_with(|| { - System::set_block_limits(w, 0); + System::set_block_consumed_resources(w, 0); assertions() }); } @@ -102,8 +104,8 @@ mod multiplier_tests { (100, fm.clone()), (1000, fm.clone()), (target(), fm.clone()), - (max() / 2, fm.clone()), - (max(), fm.clone()), + (max_normal() / 2, fm.clone()), + (max_normal(), fm.clone()), ]; test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { @@ -164,7 +166,7 @@ mod multiplier_tests { #[test] fn min_change_per_day() { - run_with_system_weight(max(), || { + run_with_system_weight(max_normal(), || { let mut fm = Multiplier::one(); // See the example in the doc of `TargetedFeeAdjustment`. are at least 0.234, hence // `fm > 1.234`. @@ -182,7 +184,7 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = AvailableBlockRatio::get() * max() - 100; + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - 100; // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); @@ -200,7 +202,7 @@ mod multiplier_tests { fm = next; iterations += 1; let fee = - ::WeightToFee::calc(&tx_weight); + ::WeightToFee::calc(&tx_weight); let adjusted_fee = fm.saturating_mul_acc_int(fee); println!( "iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \ @@ -320,15 +322,19 @@ mod multiplier_tests { 10 * mb, 2147483647, 4294967295, - MaximumBlockWeight::get() / 2, - MaximumBlockWeight::get(), + BlockWeights::get().max_block / 2, + BlockWeights::get().max_block, Weight::max_value() / 2, Weight::max_value(), ].into_iter().for_each(|i| { run_with_system_weight(i, || { let next = runtime_multiplier_update(Multiplier::one()); let truth = truth_value_update(i, Multiplier::one()); - assert_eq_error_rate!(truth, next, Multiplier::from_inner(50_000_000)); + assert_eq_error_rate!( + truth, + next, + Multiplier::from_inner(50_000_000) + ); }); }); diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index dddef0b46a884..58c98e529c319 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! The Substrate runtime. This can be compiled with ``#[no_std]`, ready for Wasm. +//! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. @@ -28,14 +28,17 @@ use frame_support::{ construct_runtime, parameter_types, debug, RuntimeDebug, weights::{ Weight, IdentityFee, - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, DispatchClass, }, traits::{ Currency, Imbalance, KeyOwnerProofSystem, OnUnbalanced, Randomness, LockIdentifier, U128CurrencyToVote, }, }; -use frame_system::{EnsureRoot, EnsureOneOf}; +use frame_system::{ + EnsureRoot, EnsureOneOf, + limits::{BlockWeights, BlockLength} +}; use frame_support::traits::InstanceFilter; use codec::{Encode, Decode}; use sp_core::{ @@ -54,7 +57,7 @@ use sp_runtime::curve::PiecewiseLinear; use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; use sp_runtime::traits::{ self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, - ConvertInto, OpaqueKeys, NumberFor, Saturating, + ConvertInto, OpaqueKeys, NumberFor, }; use sp_version::RuntimeVersion; #[cfg(any(feature = "std", test))] @@ -63,11 +66,12 @@ use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthority use pallet_grandpa::fg_primitives; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; pub use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment, CurrencyAdapter}; use pallet_session::{historical as pallet_session_historical}; use sp_inherents::{InherentData, CheckInherentsResult}; use static_assertions::const_assert; +use pallet_contracts::WeightInfo; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -87,18 +91,15 @@ pub mod constants; use constants::{time::*, currency::*}; use sp_runtime::generic::Era; -/// Weights for pallets used in the runtime. -mod weights; - // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. This means the client is \ - built with `BUILD_DUMMY_WASM_BINARY` flag and it is only usable for \ + built with `SKIP_WASM_BUILD` flag and it is only usable for \ production chains. Please rebuild with the flag disabled.") } @@ -111,10 +112,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 260, + spec_version: 264, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, }; /// Native version. @@ -144,23 +145,48 @@ impl OnUnbalanced for DealWithFees { } } -const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); +/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) - * MaximumBlockWeight::get(); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; pub const Version: RuntimeVersion = VERSION; -} - -const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); - -impl frame_system::Trait for Runtime { + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); + +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = Index; @@ -172,22 +198,16 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = frame_system::weights::SubstrateWeight; + type SS58Prefix = SS58Prefix; } -impl pallet_utility::Trait for Runtime { +impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; type WeightInfo = pallet_utility::weights::SubstrateWeight; @@ -201,7 +221,7 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl pallet_multisig::Trait for Runtime { +impl pallet_multisig::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -264,7 +284,7 @@ impl InstanceFilter for ProxyType { } } -impl pallet_proxy::Trait for Runtime { +impl pallet_proxy::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -280,11 +300,12 @@ impl pallet_proxy::Trait for Runtime { } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 50; } -impl pallet_scheduler::Trait for Runtime { +impl pallet_scheduler::Config for Runtime { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -298,9 +319,11 @@ impl pallet_scheduler::Trait for Runtime { parameter_types! { pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; + pub const ReportLongevity: u64 = + BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * EpochDuration::get(); } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = pallet_babe::ExternalTrigger; @@ -318,7 +341,7 @@ impl pallet_babe::Trait for Runtime { )>>::IdentificationTuple; type HandleEquivocation = - pallet_babe::EquivocationHandler; + pallet_babe::EquivocationHandler; type WeightInfo = (); } @@ -327,7 +350,7 @@ parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -342,7 +365,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type Balance = Balance; type DustRemoval = (); @@ -359,7 +382,7 @@ parameter_types! { pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); } -impl pallet_transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; @@ -371,7 +394,7 @@ parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = Moment; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -382,7 +405,7 @@ parameter_types! { pub const UncleGenerations: BlockNumber = 5; } -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -402,9 +425,9 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; - type ValidatorId = ::AccountId; + type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; @@ -415,7 +438,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = pallet_session::weights::SubstrateWeight; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -441,12 +464,13 @@ parameter_types! { pub const MaxIterations: u32 = 10; // 0.05%. The higher the value, the more strict solution acceptance becomes. pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = MaximumExtrinsicWeight::get() - .saturating_sub(BlockExecutionWeight::get()) - .saturating_sub(ExtrinsicBaseWeight::get()); + pub OffchainSolutionWeightLimit: Weight = RuntimeBlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic.expect("Normal extrinsics have a weight limit configured; qed") + .saturating_sub(BlockExecutionWeight::get()); } -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; @@ -492,7 +516,7 @@ parameter_types! { pub const MaxProposals: u32 = 100; } -impl pallet_democracy::Trait for Runtime { +impl pallet_democracy::Config for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; @@ -544,7 +568,7 @@ parameter_types! { } type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -557,7 +581,10 @@ impl pallet_collective::Trait for Runtime { parameter_types! { pub const CandidacyBond: Balance = 10 * DOLLARS; - pub const VotingBond: Balance = 1 * DOLLARS; + // 1 storage item created, key size is 32 bytes, value size is 16+16. + pub const VotingBondBase: Balance = deposit(1, 64); + // additional data per vote is 32 bytes (account id). + pub const VotingBondFactor: Balance = deposit(0, 32); pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; @@ -567,7 +594,7 @@ parameter_types! { // Make sure that there are no more than `MaxMembers` members elected via elections-phragmen. const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); -impl pallet_elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Config for Runtime { type Event = Event; type ModuleId = ElectionsPhragmenModuleId; type Currency = Balances; @@ -577,9 +604,9 @@ impl pallet_elections_phragmen::Trait for Runtime { type InitializeMembers = Council; type CurrencyToVote = U128CurrencyToVote; type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; + type VotingBondBase = VotingBondBase; + type VotingBondFactor = VotingBondFactor; type LoserCandidate = (); - type BadReport = (); type KickedMember = (); type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; @@ -594,7 +621,7 @@ parameter_types! { } type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -610,7 +637,7 @@ type EnsureRootOrHalfCouncil = EnsureOneOf< EnsureRoot, pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> >; -impl pallet_membership::Trait for Runtime { +impl pallet_membership::Config for Runtime { type Event = Event; type AddOrigin = EnsureRootOrHalfCouncil; type RemoveOrigin = EnsureRootOrHalfCouncil; @@ -639,7 +666,7 @@ parameter_types! { pub const BountyValueMinimum: Balance = 5 * DOLLARS; } -impl pallet_treasury::Trait for Runtime { +impl pallet_treasury::Config for Runtime { type ModuleId = TreasuryModuleId; type Currency = Balances; type ApproveOrigin = EnsureOneOf< @@ -652,55 +679,87 @@ impl pallet_treasury::Trait for Runtime { EnsureRoot, pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> >; - type Tippers = Elections; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type DataDepositPerByte = DataDepositPerByte; type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BurnDestination = (); + type SpendFunds = Bounties; + type WeightInfo = pallet_treasury::weights::SubstrateWeight; +} + +impl pallet_bounties::Config for Runtime { + type Event = Event; type BountyDepositBase = BountyDepositBase; type BountyDepositPayoutDelay = BountyDepositPayoutDelay; type BountyUpdatePeriod = BountyUpdatePeriod; type BountyCuratorDeposit = BountyCuratorDeposit; type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = MaximumReasonLength; - type BurnDestination = (); - type WeightInfo = pallet_treasury::weights::SubstrateWeight; + type WeightInfo = pallet_bounties::weights::SubstrateWeight; } -parameter_types! { - pub const TombstoneDeposit: Balance = 16 * MILLICENTS; - pub const RentByteFee: Balance = 4 * MILLICENTS; - pub const RentDepositOffset: Balance = 1000 * MILLICENTS; - pub const SurchargeReward: Balance = 150 * MILLICENTS; +impl pallet_tips::Config for Runtime { + type Event = Event; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type Tippers = Elections; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type WeightInfo = pallet_tips::weights::SubstrateWeight; } -impl pallet_contracts::Trait for Runtime { +parameter_types! { + pub const TombstoneDeposit: Balance = deposit( + 1, + sp_std::mem::size_of::>() as u32 + ); + pub const DepositPerContract: Balance = TombstoneDeposit::get(); + pub const DepositPerStorageByte: Balance = deposit(0, 1); + pub const DepositPerStorageItem: Balance = deposit(1, 0); + pub RentFraction: Perbill = Perbill::from_rational_approximation(1u32, 30 * DAYS); + pub const SurchargeReward: Balance = 150 * MILLICENTS; + pub const SignedClaimHandicap: u32 = 2; + pub const MaxDepth: u32 = 32; + pub const MaxValueSize: u32 = 16 * 1024; + // The lazy deletion runs inside on_initialize. + pub DeletionWeightLimit: Weight = AVERAGE_ON_INITIALIZE_RATIO * + RuntimeBlockWeights::get().max_block; + // The weight needed for decoding the queue should be less or equal than a fifth + // of the overall weight dedicated to the lazy deletion. + pub DeletionQueueDepth: u32 = ((DeletionWeightLimit::get() / ( + ::WeightInfo::on_initialize_per_queue_item(1) - + ::WeightInfo::on_initialize_per_queue_item(0) + )) / 5) as u32; +} + +impl pallet_contracts::Config for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; type Event = Event; - type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; - type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter; type RentPayment = (); - type SignedClaimHandicap = pallet_contracts::DefaultSignedClaimHandicap; + type SignedClaimHandicap = SignedClaimHandicap; type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = pallet_contracts::DefaultStorageSizeOffset; - type RentByteFee = RentByteFee; - type RentDepositOffset = RentDepositOffset; + type DepositPerContract = DepositPerContract; + type DepositPerStorageByte = DepositPerStorageByte; + type DepositPerStorageItem = DepositPerStorageItem; + type RentFraction = RentFraction; type SurchargeReward = SurchargeReward; - type MaxDepth = pallet_contracts::DefaultMaxDepth; - type MaxValueSize = pallet_contracts::DefaultMaxValueSize; + type MaxDepth = MaxDepth; + type MaxValueSize = MaxValueSize; type WeightPrice = pallet_transaction_payment::Module; - type WeightInfo = weights::pallet_contracts::WeightInfo; + type WeightInfo = pallet_contracts::weights::SubstrateWeight; + type ChainExtension = (); + type DeletionQueueDepth = DeletionQueueDepth; + type DeletionWeightLimit = DeletionWeightLimit; } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } @@ -770,9 +829,10 @@ impl frame_system::offchain::SendTransactionTypes for Runtime where type OverarchingCall = Call; } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; + type ValidatorSet = Historical; type SessionDuration = SessionDuration; type ReportUnresponsiveness = Offences; type UnsignedPriority = ImOnlineUnsignedPriority; @@ -780,19 +840,20 @@ impl pallet_im_online::Trait for Runtime { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * + RuntimeBlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -807,7 +868,7 @@ impl pallet_grandpa::Trait for Runtime { )>>::IdentificationTuple; type HandleEquivocation = - pallet_grandpa::EquivocationHandler; + pallet_grandpa::EquivocationHandler; type WeightInfo = (); } @@ -821,7 +882,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl pallet_identity::Trait for Runtime { +impl pallet_identity::Config for Runtime { type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; @@ -843,7 +904,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl pallet_recovery::Trait for Runtime { +impl pallet_recovery::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -864,7 +925,7 @@ parameter_types! { pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); } -impl pallet_society::Trait for Runtime { +impl pallet_society::Config for Runtime { type Event = Event; type ModuleId = SocietyModuleId; type Currency = Balances; @@ -885,7 +946,7 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; @@ -893,6 +954,56 @@ impl pallet_vesting::Trait for Runtime { type WeightInfo = pallet_vesting::weights::SubstrateWeight; } +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = ::Hashing; + type Hash = ::Hash; + type LeafData = frame_system::Module; + type OnNewRoot = (); + type WeightInfo = (); +} + +parameter_types! { + pub const LotteryModuleId: ModuleId = ModuleId(*b"py/lotto"); + pub const MaxCalls: usize = 10; + pub const MaxGenerateRandom: u32 = 10; +} + +impl pallet_lottery::Config for Runtime { + type ModuleId = LotteryModuleId; + type Call = Call; + type Event = Event; + type Currency = Balances; + type Randomness = RandomnessCollectiveFlip; + type ManagerOrigin = EnsureRoot; + type MaxCalls = MaxCalls; + type ValidateCall = Lottery; + type MaxGenerateRandom = MaxGenerateRandom; + type WeightInfo = pallet_lottery::weights::SubstrateWeight; +} + +parameter_types! { + pub const AssetDepositBase: Balance = 100 * DOLLARS; + pub const AssetDepositPerZombie: Balance = 1 * DOLLARS; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: Balance = 10 * DOLLARS; + pub const MetadataDepositPerByte: Balance = 1 * DOLLARS; +} + +impl pallet_assets::Config for Runtime { + type Event = Event; + type Balance = u64; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = EnsureRoot; + type AssetDepositBase = AssetDepositBase; + type AssetDepositPerZombie = AssetDepositPerZombie; + type StringLimit = StringLimit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type WeightInfo = pallet_assets::weights::SubstrateWeight; +} + construct_runtime!( pub enum Runtime where Block = Block, @@ -930,11 +1041,16 @@ construct_runtime!( Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, Proxy: pallet_proxy::{Module, Call, Storage, Event}, Multisig: pallet_multisig::{Module, Call, Storage, Event}, + Bounties: pallet_bounties::{Module, Call, Storage, Event}, + Tips: pallet_tips::{Module, Call, Storage, Event}, + Assets: pallet_assets::{Module, Call, Storage, Event}, + Mmr: pallet_mmr::{Module, Storage}, + Lottery: pallet_lottery::{Module, Call, Storage, Event}, } ); /// The address format for describing accounts. -pub type Address = ::Source; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. @@ -966,6 +1082,20 @@ pub type CheckedExtrinsic = generic::CheckedExtrinsic, Runtime, AllModules>; +/// MMR helper types. +mod mmr { + use super::Runtime; + pub use pallet_mmr::primitives::*; + + pub type Leaf = < + ::LeafData + as + LeafDataProvider + >::LeafData; + pub type Hash = ::Hash; + pub type Hashing = ::Hashing; +} + impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -1073,12 +1203,20 @@ impl_runtime_apis! { } } - fn current_epoch_start() -> sp_consensus_babe::SlotNumber { + fn current_epoch_start() -> sp_consensus_babe::Slot { Babe::current_epoch_start() } + fn current_epoch() -> sp_consensus_babe::Epoch { + Babe::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + Babe::next_epoch() + } + fn generate_key_ownership_proof( - _slot_number: sp_consensus_babe::SlotNumber, + _slot: sp_consensus_babe::Slot, authority_id: sp_consensus_babe::AuthorityId, ) -> Option { use codec::Encode; @@ -1147,6 +1285,32 @@ impl_runtime_apis! { fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + + impl pallet_mmr::primitives::MmrApi< + Block, + mmr::Leaf, + mmr::Hash, + > for Runtime { + fn generate_proof(leaf_index: u64) -> Result<(mmr::Leaf, mmr::Proof), mmr::Error> { + Mmr::generate_proof(leaf_index) + } + + fn verify_proof(leaf: mmr::Leaf, proof: mmr::Proof) -> Result<(), mmr::Error> { + Mmr::verify_leaf(leaf, proof) + } + + fn verify_proof_stateless( + root: mmr::Hash, + leaf: Vec, + proof: mmr::Proof + ) -> Result<(), mmr::Error> { + let node = mmr::DataOrHash::Data(mmr::OpaqueLeaf(leaf)); + pallet_mmr::verify_leaf_proof::(root, node, proof) + } } impl sp_session::SessionKeys for Runtime { @@ -1174,9 +1338,9 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Module as OffencesBench; use frame_system_benchmarking::Module as SystemBench; - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - impl frame_system_benchmarking::Trait for Runtime {} + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -1196,8 +1360,10 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); + add_benchmark!(params, batches, pallet_assets, Assets); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_bounties, Bounties); add_benchmark!(params, batches, pallet_collective, Council); add_benchmark!(params, batches, pallet_contracts, Contracts); add_benchmark!(params, batches, pallet_democracy, Democracy); @@ -1206,6 +1372,8 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_identity, Identity); add_benchmark!(params, batches, pallet_im_online, ImOnline); add_benchmark!(params, batches, pallet_indices, Indices); + add_benchmark!(params, batches, pallet_lottery, Lottery); + add_benchmark!(params, batches, pallet_mmr, Mmr); add_benchmark!(params, batches, pallet_multisig, Multisig); add_benchmark!(params, batches, pallet_offences, OffencesBench::); add_benchmark!(params, batches, pallet_proxy, Proxy); @@ -1214,6 +1382,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_staking, Staking); add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_tips, Tips); add_benchmark!(params, batches, pallet_treasury, Treasury); add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); diff --git a/bin/node/runtime/src/weights/pallet_contracts.rs b/bin/node/runtime/src/weights/pallet_contracts.rs deleted file mode 100644 index 8cd97b4a72191..0000000000000 --- a/bin/node/runtime/src/weights/pallet_contracts.rs +++ /dev/null @@ -1,294 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Weights for pallet_contracts -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-06, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_contracts::WeightInfo for WeightInfo { - fn update_schedule() -> Weight { - (33_207_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn put_code(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((144_833_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn instantiate(n: u32, ) -> Weight { - (223_974_000 as Weight) - .saturating_add((1_007_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn call() -> Weight { - (210_638_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn claim_surcharge() -> Weight { - (508_079_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn seal_caller(r: u32, ) -> Weight { - (143_336_000 as Weight) - .saturating_add((397_788_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_address(r: u32, ) -> Weight { - (147_296_000 as Weight) - .saturating_add((396_962_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_gas_left(r: u32, ) -> Weight { - (141_677_000 as Weight) - .saturating_add((393_308_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_balance(r: u32, ) -> Weight { - (157_556_000 as Weight) - .saturating_add((879_861_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_value_transferred(r: u32, ) -> Weight { - (148_867_000 as Weight) - .saturating_add((391_678_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_minimum_balance(r: u32, ) -> Weight { - (147_252_000 as Weight) - .saturating_add((393_977_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_tombstone_deposit(r: u32, ) -> Weight { - (144_208_000 as Weight) - .saturating_add((394_625_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_rent_allowance(r: u32, ) -> Weight { - (135_320_000 as Weight) - .saturating_add((925_541_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_block_number(r: u32, ) -> Weight { - (145_849_000 as Weight) - .saturating_add((390_065_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_now(r: u32, ) -> Weight { - (146_363_000 as Weight) - .saturating_add((391_772_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_weight_to_fee(r: u32, ) -> Weight { - (129_872_000 as Weight) - .saturating_add((670_744_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_gas(r: u32, ) -> Weight { - (130_985_000 as Weight) - .saturating_add((198_427_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_input(r: u32, ) -> Weight { - (138_647_000 as Weight) - .saturating_add((8_363_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_input_per_kb(n: u32, ) -> Weight { - (149_418_000 as Weight) - .saturating_add((272_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_return(r: u32, ) -> Weight { - (129_116_000 as Weight) - .saturating_add((5_745_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_return_per_kb(n: u32, ) -> Weight { - (139_601_000 as Weight) - .saturating_add((680_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_terminate(r: u32, ) -> Weight { - (138_548_000 as Weight) - .saturating_add((355_473_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to(r: u32, ) -> Weight { - (239_880_000 as Weight) - .saturating_add((138_305_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (40_572_000 as Weight) - .saturating_add((3_748_632_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) - } - fn seal_random(r: u32, ) -> Weight { - (148_156_000 as Weight) - .saturating_add((1_036_452_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_deposit_event(r: u32, ) -> Weight { - (176_039_000 as Weight) - .saturating_add((1_497_705_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_923_547_000 as Weight) - .saturating_add((783_354_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((240_600_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) - } - fn seal_set_rent_allowance(r: u32, ) -> Weight { - (151_095_000 as Weight) - .saturating_add((1_104_696_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn seal_set_storage(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((14_975_467_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (2_465_724_000 as Weight) - .saturating_add((203_125_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn seal_clear_storage(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((5_254_595_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage(r: u32, ) -> Weight { - (60_303_000 as Weight) - .saturating_add((1_135_486_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (931_900_000 as Weight) - .saturating_add((144_572_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_transfer(r: u32, ) -> Weight { - (50_722_000 as Weight) - .saturating_add((6_701_164_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((10_589_747_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (11_223_388_000 as Weight) - .saturating_add((4_965_182_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((50_603_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((72_972_000 as Weight).saturating_mul(o as Weight)) - .saturating_add(T::DbWeight::get().reads(105 as Weight)) - .saturating_add(T::DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) - } - fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((22_933_938_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((200 as Weight).saturating_mul(r as Weight))) - } - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight { - (20_986_307_000 as Weight) - .saturating_add((152_611_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((73_457_000 as Weight).saturating_mul(o as Weight)) - .saturating_add(T::DbWeight::get().reads(207 as Weight)) - .saturating_add(T::DbWeight::get().writes(202 as Weight)) - } - fn seal_hash_sha2_256(r: u32, ) -> Weight { - (145_988_000 as Weight) - .saturating_add((343_540_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (719_758_000 as Weight) - .saturating_add((420_306_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256(r: u32, ) -> Weight { - (116_261_000 as Weight) - .saturating_add((360_601_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (583_726_000 as Weight) - .saturating_add((333_091_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256(r: u32, ) -> Weight { - (144_609_000 as Weight) - .saturating_add((332_388_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (612_987_000 as Weight) - .saturating_add((150_030_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128(r: u32, ) -> Weight { - (142_085_000 as Weight) - .saturating_add((329_426_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (632_517_000 as Weight) - .saturating_add((149_974_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } -} diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index bc1f07645eed9..95bc8abef6fc9 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -13,38 +13,38 @@ publish = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -sc-service = { version = "0.8.0", features = ["test-helpers", "db"], path = "../../../client/service" } -sc-client-db = { version = "0.8.0", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } -sc-client-api = { version = "2.0.0", path = "../../../client/api/" } -codec = { package = "parity-scale-codec", version = "1.3.4" } +pallet-balances = { version = "3.0.0", path = "../../../frame/balances" } +sc-service = { version = "0.9.0", features = ["test-helpers", "db"], path = "../../../client/service" } +sc-client-db = { version = "0.9.0", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } +sc-client-api = { version = "3.0.0", path = "../../../client/api/" } +codec = { package = "parity-scale-codec", version = "2.0.0" } pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +pallet-grandpa = { version = "3.0.0", path = "../../../frame/grandpa" } +pallet-indices = { version = "3.0.0", path = "../../../frame/indices" } +sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } node-executor = { version = "2.0.0", path = "../executor" } node-primitives = { version = "2.0.0", path = "../primitives" } node-runtime = { version = "2.0.0", path = "../runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -frame-support = { version = "2.0.0", path = "../../../frame/support" } -pallet-session = { version = "2.0.0", path = "../../../frame/session" } -pallet-society = { version = "2.0.0", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } -sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } +frame-support = { version = "3.0.0", path = "../../../frame/support" } +pallet-session = { version = "3.0.0", path = "../../../frame/session" } +pallet-society = { version = "3.0.0", path = "../../../frame/society" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +pallet-staking = { version = "3.0.0", path = "../../../frame/staking" } +sc-executor = { version = "0.9.0", path = "../../../client/executor", features = ["wasmtime"] } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +frame-system = { version = "3.0.0", path = "../../../frame/system" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +pallet-timestamp = { version = "3.0.0", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "3.0.0", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "3.0.0", path = "../../../frame/treasury" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "3.0.0", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.9.0", path = "../../../client/block-builder" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } log = "0.4.8" tempfile = "3.1.0" fs_extra = "1" @@ -52,4 +52,4 @@ futures = "0.3.1" [dev-dependencies] criterion = "0.3.0" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sc-cli = { version = "0.9.0", path = "../../../client/cli" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 153a52375c2a9..a6f65b86a0e27 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -172,7 +172,7 @@ impl Clone for BenchDb { .map(|f_result| f_result.expect("failed to read file in seed db") .path() - ).collect(); + ).collect::>(); fs_extra::copy_items( &seed_db_files, dir.path(), @@ -317,7 +317,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { BlockType::RandomTransfersKeepAlive => { Call::Balances( BalancesCall::transfer_keep_alive( - pallet_indices::address::Address::Id(receiver), + sp_runtime::MultiAddress::Id(receiver), node_runtime::ExistentialDeposit::get() + 1, ) ) @@ -325,7 +325,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { BlockType::RandomTransfersReaping => { Call::Balances( BalancesCall::transfer( - pallet_indices::address::Address::Id(receiver), + sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential deposit // so that we kill the sender account. 100*DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), @@ -410,8 +410,10 @@ impl BenchDb { let db_config = sc_client_db::DatabaseSettings { state_cache_size: 16*1024*1024, state_cache_child_ratio: Some((0, 100)), - pruning: PruningMode::ArchiveAll, + state_pruning: PruningMode::ArchiveAll, source: database_type.into_settings(dir.into()), + keep_blocks: sc_client_db::KeepBlocks::All, + transaction_storage: sc_client_db::TransactionStorageMode::BlockBody, }; let task_executor = TaskExecutor::new(); @@ -591,7 +593,7 @@ impl BenchKeyring { } }).into(); UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), + signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } } @@ -695,7 +697,6 @@ impl BenchContext { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } ) diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index f44747b26b7a6..c4ace4ced9b42 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 6fa178ba4bcdd..75d0d18e6ef81 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 3413748563633..da61040206ea4 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -94,7 +94,7 @@ pub fn sign(xt: CheckedExtrinsic, spec_version: u32, tx_version: u32, genesis_ha } }).into(); UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), + signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } } diff --git a/bin/node/testing/src/lib.rs b/bin/node/testing/src/lib.rs index d682347e40019..c5792bccee80d 100644 --- a/bin/node/testing/src/lib.rs +++ b/bin/node/testing/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index a57dadd26bda8..3c60d654db944 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } +sc-keystore = { version = "3.0.0", path = "../../../client/keystore" } +sc-chain-spec = { version = "3.0.0", path = "../../../client/chain-spec" } node-cli = { version = "2.0.0", path = "../../node/cli" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } rand = "0.7.2" structopt = "0.3.8" diff --git a/bin/utils/chain-spec-builder/build.rs b/bin/utils/chain-spec-builder/build.rs index 8d5aac1a08742..57424f016f3e5 100644 --- a/bin/utils/chain-spec-builder/build.rs +++ b/bin/utils/chain-spec-builder/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index c2db944050eb4..f3336b1d53a84 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index fa0b345bc8406..b0c71a4fc3327 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -8,21 +8,13 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" readme = "README.md" +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + [[bin]] path = "src/main.rs" name = "subkey" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - [dependencies] -node-runtime = { version = "2.0.0", path = "../../node/runtime" } -node-primitives = { version = "2.0.0", path = "../../node/primitives" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -substrate-frame-cli = { version = "2.0.0", path = "../../../utils/frame/frame-utilities-cli" } +sc-cli = { version = "0.9.0", path = "../../../client/cli" } structopt = "0.3.14" -frame-system = { version = "2.0.0", path = "../../../frame/system" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } - -[features] -bench = [] diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 051628e84a193..e7243fbd43e46 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,11 +18,9 @@ use structopt::StructOpt; use sc_cli::{ - Error, VanityCmd, SignCmd, VerifyCmd, InsertCmd, - GenerateNodeKeyCmd, GenerateCmd, InspectKeyCmd, InspectNodeKeyCmd + Error, VanityCmd, SignCmd, VerifyCmd, GenerateNodeKeyCmd, GenerateCmd, InspectKeyCmd, + InspectNodeKeyCmd }; -use substrate_frame_cli::ModuleIdCmd; -use sp_core::crypto::Ss58Codec; #[derive(Debug, StructOpt)] #[structopt( @@ -44,12 +42,6 @@ pub enum Subkey { /// Print the peer ID corresponding to the node key in the given file InspectNodeKey(InspectNodeKeyCmd), - /// Insert a key to the keystore of a node. - Insert(InsertCmd), - - /// Inspect a module ID address - ModuleId(ModuleIdCmd), - /// Sign a message, with a given (secret) key. Sign(SignCmd), @@ -61,22 +53,14 @@ pub enum Subkey { } /// Run the subkey command, given the apropriate runtime. -pub fn run() -> Result<(), Error> - where - R: frame_system::Trait, - R::AccountId: Ss58Codec -{ +pub fn run() -> Result<(), Error> { match Subkey::from_args() { - Subkey::GenerateNodeKey(cmd) => cmd.run()?, - Subkey::Generate(cmd) => cmd.run()?, - Subkey::Inspect(cmd) => cmd.run()?, - Subkey::InspectNodeKey(cmd) => cmd.run()?, - Subkey::Insert(cmd) => cmd.run()?, - Subkey::ModuleId(cmd) => cmd.run::()?, - Subkey::Vanity(cmd) => cmd.run()?, - Subkey::Verify(cmd) => cmd.run()?, - Subkey::Sign(cmd) => cmd.run()?, - }; - - Ok(()) + Subkey::GenerateNodeKey(cmd) => cmd.run(), + Subkey::Generate(cmd) => cmd.run(), + Subkey::Inspect(cmd) => cmd.run(), + Subkey::InspectNodeKey(cmd) => cmd.run(), + Subkey::Vanity(cmd) => cmd.run(), + Subkey::Verify(cmd) => cmd.run(), + Subkey::Sign(cmd) => cmd.run(), + } } diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index dd14425130b7d..2a0f0850713fa 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,8 +18,6 @@ //! Subkey utility, based on node_runtime. -use node_runtime::Runtime; - fn main() -> Result<(), sc_cli::Error> { - subkey::run::() + subkey::run() } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index d0fb5fc3ee0e2..637dae4a29abd 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-api" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,38 +14,36 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } derive_more = "0.99.2" -sc-executor = { version = "0.8.0", path = "../executor" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sc-executor = { version = "0.9.0", path = "../executor" } +sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } fnv = "1.0.6" futures = "0.3.1" hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -hex-literal = "0.3.1" -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -kvdb = "0.7.0" +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +kvdb = "0.9.0" log = "0.4.8" -parking_lot = "0.10.0" +parking_lot = "0.11.1" lazy_static = "1.4.0" -sp-database = { version = "2.0.0", path = "../../primitives/database" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", default-features = false, path = "../../primitives/keystore" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } +sp-database = { version = "3.0.0", path = "../../primitives/database" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", default-features = false, path = "../../primitives/keystore" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-version = { version = "3.0.0", default-features = false, path = "../../primitives/version" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-trie = { version = "3.0.0", path = "../../primitives/trie" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } [dev-dependencies] -kvdb-memorydb = "0.7.0" +kvdb-memorydb = "0.9.0" sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +thiserror = "1.0.21" diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 47fec977f5e82..e41b250269a11 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,12 +21,12 @@ use std::sync::Arc; use std::collections::{HashMap, HashSet}; use sp_core::ChangesTrieConfigurationRange; -use sp_core::offchain::{OffchainStorage,storage::OffchainOverlayedChanges}; +use sp_core::offchain::OffchainStorage; use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, + StorageCollection, ChildStorageCollection, OffchainChangesCollection, }; use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ @@ -174,7 +174,7 @@ pub trait BlockImportOperation { /// Write offchain storage changes to the database. fn update_offchain_storage( &mut self, - _offchain_update: OffchainOverlayedChanges, + _offchain_update: OffchainChangesCollection, ) -> sp_blockchain::Result<()> { Ok(()) } diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 86e3440f19c93..5f1e0134a5caf 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -28,7 +28,7 @@ use sp_state_machine::{ }; use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; -use sp_core::{NativeOrEncoded,offchain::storage::OffchainOverlayedChanges}; +use sp_core::NativeOrEncoded; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; @@ -86,7 +86,6 @@ pub trait CallExecutor { method: &str, call_data: &[u8], changes: &RefCell, - offchain_changes: &RefCell, storage_transaction_cache: Option<&RefCell< StorageTransactionCache>::State>, >>, diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 7fd7aa0dbcb7e..8fec00403bde1 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,7 +22,7 @@ //! One is generated for every `SIZE` blocks, allowing us to discard those blocks in //! favor of the trie root. When the "ancient" blocks need to be accessed, we simply //! request an inclusion proof of a specific block number against the trie with the -//! root has. A correct proof implies that the claimed block is identical to the one +//! root hash. A correct proof implies that the claimed block is identical to the one //! we discarded. use hash_db; diff --git a/client/api/src/client.rs b/client/api/src/client.rs index f97daa487638f..990a7908b62bb 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! A set of APIs supported by the client along with their primitives. @@ -93,6 +95,17 @@ pub trait BlockBackend { /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; + + /// Get single extrinsic by hash. + fn extrinsic( + &self, + hash: &Block::Hash, + ) -> sp_blockchain::Result::Extrinsic>>; + + /// Check if extrinsic exists. + fn have_extrinsic(&self, hash: &Block::Hash) -> sp_blockchain::Result { + Ok(self.extrinsic(hash)?.is_some()) + } } /// Provide a list of potential uncle headers for a given block. diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 4fdd897b21579..1b13f2c6cffd2 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Execution extensions for runtime calls. //! @@ -136,31 +138,9 @@ impl ExecutionExtensions { *self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _); } - /// Create `ExecutionManager` and `Extensions` for given offchain call. - /// /// Based on the execution context and capabilities it produces - /// the right manager and extensions object to support desired set of APIs. - pub fn manager_and_extensions( - &self, - at: &BlockId, - context: ExecutionContext, - ) -> ( - ExecutionManager>, - Extensions, - ) { - let manager = match context { - ExecutionContext::BlockConstruction => - self.strategies.block_construction.get_manager(), - ExecutionContext::Syncing => - self.strategies.syncing.get_manager(), - ExecutionContext::Importing => - self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => - self.strategies.offchain_worker.get_manager(), - ExecutionContext::OffchainCall(_) => - self.strategies.other.get_manager(), - }; - + /// the extensions object to support desired set of APIs. + pub fn extensions(&self, at: &BlockId, context: ExecutionContext) -> Extensions { let capabilities = context.capabilities(); let mut extensions = self.extensions_factory.read().extensions_for(capabilities); @@ -190,7 +170,35 @@ impl ExecutionExtensions { ); } - (manager, extensions) + extensions + } + + /// Create `ExecutionManager` and `Extensions` for given offchain call. + /// + /// Based on the execution context and capabilities it produces + /// the right manager and extensions object to support desired set of APIs. + pub fn manager_and_extensions( + &self, + at: &BlockId, + context: ExecutionContext, + ) -> ( + ExecutionManager>, + Extensions, + ) { + let manager = match context { + ExecutionContext::BlockConstruction => + self.strategies.block_construction.get_manager(), + ExecutionContext::Syncing => + self.strategies.syncing.get_manager(), + ExecutionContext::Importing => + self.strategies.importing.get_manager(), + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => + self.strategies.offchain_worker.get_manager(), + ExecutionContext::OffchainCall(_) => + self.strategies.other.get_manager(), + }; + + (manager, self.extensions(at, context)) } } @@ -205,7 +213,7 @@ impl offchain::TransactionPool for TransactionPoolAdapter< let xt = match Block::Extrinsic::decode(&mut &*data) { Ok(xt) => xt, Err(e) => { - log::warn!("Unable to decode extrinsic: {:?}: {}", data, e.what()); + log::warn!("Unable to decode extrinsic: {:?}: {}", data, e); return Err(()); }, }; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index ded030fb8046f..c108acc7b43b9 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -386,6 +386,13 @@ impl blockchain::Backend for Blockchain { fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { unimplemented!() } + + fn extrinsic( + &self, + _hash: &Block::Hash, + ) -> sp_blockchain::Result::Extrinsic>> { + unimplemented!("Not supported by the in-mem backend.") + } } impl blockchain::ProvideCache for Blockchain { diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index d10fa7ac0e565..1971012c6aabc 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 677066936330e..0f860b95e7805 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate client interfaces. #![warn(missing_docs)] diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 144851dac0075..a068e2d4a3417 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate light client interfaces @@ -312,13 +314,21 @@ pub mod tests { use sp_test_primitives::{Block, Header, Extrinsic}; use super::*; + #[derive(Debug, thiserror::Error)] + #[error("Not implemented on test node")] + struct MockError; + + impl Into for MockError { + fn into(self) -> ClientError { + ClientError::Application(Box::new(self)) + } + } + pub type OkCallFetcher = Mutex>; - fn not_implemented_in_tests() -> Ready> - where - E: std::convert::From<&'static str>, + fn not_implemented_in_tests() -> Ready> { - futures::future::ready(Err("Not implemented on test node".into())) + futures::future::ready(Err(MockError.into())) } impl Fetcher for OkCallFetcher { diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index ec63c372c7e59..bfd419ec9a581 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 5749ae0576fc3..a0dbcf1d1e807 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index ff6c26bbee53e..9a1b8c8dab505 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -14,34 +14,32 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.6.1" +prost-build = "0.7" [dependencies] async-trait = "0.1" -bytes = "0.5.0" -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.4" } +codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } derive_more = "0.99.2" either = "1.5.3" -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.29.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.34.0", default-features = false, features = ["kad"] } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -prost = "0.6.1" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +prost = "0.7" rand = "0.7.2" -sc-client-api = { version = "2.0.0", path = "../api" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -sc-network = { version = "0.8.0", path = "../network" } +sc-client-api = { version = "3.0.0", path = "../api" } +sc-network = { version = "0.9.0", path = "../network" } serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-authority-discovery = { version = "3.0.0", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } [dev-dependencies] -quickcheck = "0.9.0" -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sc-peerset = { version = "2.0.0", path = "../peerset" } +quickcheck = "1.0.3" +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sc-peerset = { version = "3.0.0", path = "../peerset" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client"} diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index 48bcdf33114b1..b271f7b9d62bb 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Authority discovery errors. @@ -31,13 +33,11 @@ pub enum Error { /// Failed to verify a dht payload with the given signature. VerifyingDhtPayload, /// Failed to hash the authority id to be used as a dht key. - HashingAuthorityId(libp2p::core::multiaddr::multihash::EncodeError), + HashingAuthorityId(libp2p::core::multiaddr::multihash::Error), /// Failed calling into the Substrate runtime. CallingRuntime(sp_blockchain::Error), /// Received a dht record with a key that does not match any in-flight awaited keys. ReceivingUnexpectedRecord, - /// Failed to set the authority discovery peerset priority group in the peerset module. - SettingPeersetPriorityGroup(String), /// Failed to encode a protobuf payload. EncodingProto(prost::EncodeError), /// Failed to decode a protobuf payload. diff --git a/client/authority-discovery/src/interval.rs b/client/authority-discovery/src/interval.rs new file mode 100644 index 0000000000000..0710487203d53 --- /dev/null +++ b/client/authority-discovery/src/interval.rs @@ -0,0 +1,64 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::stream::Stream; +use futures::future::FutureExt; +use futures::ready; +use futures_timer::Delay; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +/// Exponentially increasing interval +/// +/// Doubles interval duration on each tick until the configured maximum is reached. +pub struct ExpIncInterval { + max: Duration, + next: Duration, + delay: Delay, +} + +impl ExpIncInterval { + /// Create a new [`ExpIncInterval`]. + pub fn new(start: Duration, max: Duration) -> Self { + let delay = Delay::new(start); + Self { + max, + next: start * 2, + delay, + } + } + + /// Fast forward the exponentially increasing interval to the configured maximum. + pub fn set_to_max(&mut self) { + self.next = self.max; + self.delay = Delay::new(self.next); + } +} + +impl Stream for ExpIncInterval { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.delay.poll_unpin(cx)); + self.delay = Delay::new(self.next); + self.next = std::cmp::min(self.max, self.next * 2); + + Poll::Ready(Some(())) + } +} diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 4ee57f31e04a5..26d4396ca8830 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . #![warn(missing_docs)] #![recursion_limit = "1024"] @@ -38,50 +40,41 @@ use sp_runtime::traits::Block as BlockT; use sp_api::ProvideRuntimeApi; mod error; +mod interval; mod service; +mod worker; + #[cfg(test)] mod tests; -mod worker; /// Configuration of [`Worker`]. pub struct WorkerConfig { - /// The interval in which the node will publish its own address on the DHT. - /// - /// By default this is set to 12 hours. - pub publish_interval: Duration, - /// The interval in which the node will query the DHT for new entries. + /// The maximum interval in which the node will publish its own address on the DHT. /// - /// By default this is set to 10 minutes. - pub query_interval: Duration, - /// The time the node will wait before triggering the first DHT query or publish. - /// - /// By default this is set to 30 seconds. - /// - /// This default is based on the rough boostrap time required by libp2p Kademlia. - pub query_start_delay: Duration, - /// The interval in which the worker will instruct the peerset to connect to a random subset - /// of discovered validators. + /// By default this is set to 1 hour. + pub max_publish_interval: Duration, + /// The maximum interval in which the node will query the DHT for new entries. /// /// By default this is set to 10 minutes. - pub priority_group_set_interval: Duration, - /// The time the worker will wait after each query interval tick to pass a subset of - /// the cached authority addresses down to the peerset. - /// - /// Be aware that the actual delay will be computed by [`Self::query_start_delay`] + - /// [`Self::priority_group_set_start_delay`] - /// - /// By default this is set to 5 minutes. - pub priority_group_set_offset: Duration, + pub max_query_interval: Duration, } impl Default for WorkerConfig { fn default() -> Self { Self { - publish_interval: Duration::from_secs(12 * 60 * 60), - query_interval: Duration::from_secs(10 * 60), - query_start_delay: Duration::from_secs(30), - priority_group_set_interval: Duration::from_secs(10 * 60), - priority_group_set_offset: Duration::from_secs(5 * 60), + // Kademlia's default time-to-live for Dht records is 36h, republishing records every + // 24h through libp2p-kad. Given that a node could restart at any point in time, one can + // not depend on the republishing process, thus publishing own external addresses should + // happen on an interval < 36h. + max_publish_interval: Duration::from_secs(1 * 60 * 60), + // External addresses of remote authorities can change at any given point in time. The + // interval on which to trigger new queries for the current and next authorities is a trade + // off between efficiency and performance. + // + // Querying 700 [`AuthorityId`]s takes ~8m on the Kusama DHT (16th Nov 2020) when + // comparing `authority_discovery_authority_addresses_requested_total` and + // `authority_discovery_dht_event_received`. + max_query_interval: Duration::from_secs(10 * 60), } } } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 7eabeb3daf52e..1da97cbb03b53 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -1,18 +1,20 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::ServicetoWorkerMsg; @@ -22,14 +24,14 @@ use futures::SinkExt; use sc_network::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; -/// Service to interact with the [`Worker`]. +/// Service to interact with the [`crate::Worker`]. #[derive(Clone)] pub struct Service { to_worker: mpsc::Sender, } -/// A [`Service`] allows to interact with a [`Worker`], e.g. by querying the -/// [`Worker`]'s local address cache for a given [`AuthorityId`]. +/// A [`Service`] allows to interact with a [`crate::Worker`], e.g. by querying the +/// [`crate::Worker`]'s local address cache for a given [`AuthorityId`]. impl Service { pub(crate) fn new(to_worker: mpsc::Sender) -> Self { Self { @@ -44,7 +46,7 @@ impl Service { /// [`crate::Worker`] failed. /// /// Note: [`Multiaddr`]s returned always include a [`PeerId`] via a - /// [`libp2p::core::multiaddr:Protocol::P2p`] component. Equality of + /// [`libp2p::core::multiaddr::Protocol::P2p`] component. Equality of /// [`PeerId`]s across [`Multiaddr`]s returned by a single call is not /// enforced today, given that there are still authorities out there /// publishing the addresses of their sentry nodes on the DHT. In the future diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 414ffc1e3f394..78e978e07a1a0 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 42ae3a5213f0f..e47f42a445ee9 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -1,35 +1,36 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -use crate::{error::{Error, Result}, ServicetoWorkerMsg}; +use crate::{error::{Error, Result}, interval::ExpIncInterval, ServicetoWorkerMsg}; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use std::marker::PhantomData; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use futures::channel::mpsc; use futures::{FutureExt, Stream, StreamExt, stream::Fuse}; -use futures_timer::Delay; use addr_cache::AddrCache; use async_trait::async_trait; use codec::Decode; -use libp2p::{core::multiaddr, multihash::Multihash}; +use libp2p::{core::multiaddr, multihash::{Multihash, Hasher}}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; use prost::Message; @@ -54,14 +55,8 @@ mod schema { include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); } #[cfg(test)] pub mod tests; -type Interval = Box + Unpin + Send + Sync>; - const LOG_TARGET: &'static str = "sub-authority-discovery"; -/// Name of the Substrate peerset priority group for authorities discovered through the authority -/// discovery module. -const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; - /// Maximum number of addresses cached per authority. Additional addresses are discarded. const MAX_ADDRESSES_PER_AUTHORITY: usize = 10; @@ -103,7 +98,7 @@ pub enum Role { /// /// 5. Allow querying of the collected addresses via the [`crate::Service`]. pub struct Worker { - /// Channel receiver for messages send by a [`Service`]. + /// Channel receiver for messages send by a [`crate::Service`]. from_service: Fuse>, client: Arc, @@ -113,12 +108,9 @@ pub struct Worker { dht_event_rx: DhtEventStream, /// Interval to be proactive, publishing own addresses. - publish_interval: Interval, + publish_interval: ExpIncInterval, /// Interval at which to request addresses of authorities, refilling the pending lookups queue. - query_interval: Interval, - /// Interval on which to set the peerset priority group to a new random - /// set of addresses. - priority_group_set_interval: Interval, + query_interval: ExpIncInterval, /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, @@ -153,31 +145,19 @@ where prometheus_registry: Option, config: crate::WorkerConfig, ) -> Self { - // Kademlia's default time-to-live for Dht records is 36h, republishing - // records every 24h through libp2p-kad. - // Given that a node could restart at any point in time, one can not depend on the - // republishing process, thus publishing own external addresses should happen on an interval - // < 36h. - let publish_interval = interval_at( - Instant::now() + config.query_start_delay, - config.publish_interval, + // When a node starts up publishing and querying might fail due to various reasons, for + // example due to being not yet fully bootstrapped on the DHT. Thus one should retry rather + // sooner than later. On the other hand, a long running node is likely well connected and + // thus timely retries are not needed. For this reasoning use an exponentially increasing + // interval for `publish_interval`, `query_interval` and `priority_group_set_interval` + // instead of a constant interval. + let publish_interval = ExpIncInterval::new( + Duration::from_secs(2), + config.max_publish_interval, ); - - // External addresses of remote authorities can change at any given point in time. The - // interval on which to trigger new queries for the current authorities is a trade off - // between efficiency and performance. - let query_interval_start = Instant::now() + config.query_start_delay; - let query_interval_duration = config.query_interval; - let query_interval = interval_at(query_interval_start, query_interval_duration); - - // Querying 500 [`AuthorityId`]s takes ~1m on the Kusama DHT (10th of August 2020) when - // comparing `authority_discovery_authority_addresses_requested_total` and - // `authority_discovery_dht_event_received`. With that in mind set the peerset priority - // group on the same interval as the [`query_interval`] above, - // just delayed by 5 minutes by default. - let priority_group_set_interval = interval_at( - query_interval_start + config.priority_group_set_offset, - config.priority_group_set_interval, + let query_interval = ExpIncInterval::new( + Duration::from_secs(2), + config.max_query_interval, ); let addr_cache = AddrCache::new(); @@ -202,7 +182,6 @@ where dht_event_rx, publish_interval, query_interval, - priority_group_set_interval, pending_lookups: Vec::new(), in_flight_lookups: HashMap::new(), addr_cache, @@ -232,15 +211,6 @@ where msg = self.from_service.select_next_some() => { self.process_message_from_service(msg); }, - // Set peerset priority group to a new random set of addresses. - _ = self.priority_group_set_interval.next().fuse() => { - if let Err(e) = self.set_priority_group().await { - error!( - target: LOG_TARGET, - "Failed to set priority group: {:?}", e, - ); - } - }, // Publish own addresses. _ = self.publish_interval.next().fuse() => { if let Err(e) = self.publish_ext_addresses().await { @@ -413,7 +383,7 @@ where } if log_enabled!(log::Level::Debug) { - let hashes = v.iter().map(|(hash, _value)| hash.clone()); + let hashes: Vec<_> = v.iter().map(|(hash, _value)| hash.clone()).collect(); debug!( target: LOG_TARGET, "Value for hash '{:?}' found on Dht.", hashes, @@ -449,6 +419,11 @@ where } }, DhtEvent::ValuePut(hash) => { + // Fast forward the exponentially increasing interval to the configured maximum. In + // case this was the first successful address publishing there is no need for a + // timely retry. + self.publish_interval.set_to_max(); + if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put"]).inc(); } @@ -583,52 +558,13 @@ where Ok(intersection) } - - /// Set the peer set 'authority' priority group to a new random set of - /// [`Multiaddr`]s. - async fn set_priority_group(&self) -> Result<()> { - let addresses = self.addr_cache.get_random_subset(); - - if addresses.is_empty() { - debug!( - target: LOG_TARGET, - "Got no addresses in cache for peerset priority group.", - ); - return Ok(()); - } - - if let Some(metrics) = &self.metrics { - metrics.priority_group_size.set(addresses.len().try_into().unwrap_or(std::u64::MAX)); - } - - debug!( - target: LOG_TARGET, - "Applying priority group {:?} to peerset.", addresses, - ); - - self.network - .set_priority_group( - AUTHORITIES_PRIORITY_GROUP_NAME.to_string(), - addresses.into_iter().collect(), - ).await - .map_err(Error::SettingPeersetPriorityGroup)?; - - Ok(()) - } } /// NetworkProvider provides [`Worker`] with all necessary hooks into the -/// underlying Substrate networking. Using this trait abstraction instead of [`NetworkService`] -/// directly is necessary to unit test [`Worker`]. +/// underlying Substrate networking. Using this trait abstraction instead of +/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. #[async_trait] pub trait NetworkProvider: NetworkStateInfo { - /// Modify a peerset priority group. - async fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String>; - /// Start putting a value in the Dht. fn put_value(&self, key: libp2p::kad::record::Key, value: Vec); @@ -642,13 +578,6 @@ where B: BlockT + 'static, H: ExHashT, { - async fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group(group_id, peers).await - } fn put_value(&self, key: libp2p::kad::record::Key, value: Vec) { self.put_value(key, value) } @@ -661,16 +590,6 @@ fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) } -fn interval_at(start: Instant, duration: Duration) -> Interval { - let stream = futures::stream::unfold(start, move |next| { - let time_until_next = next.saturating_duration_since(Instant::now()); - - Delay::new(time_until_next).map(move |_| Some(((), next + duration))) - }); - - Box::new(stream) -} - /// Prometheus metrics for a [`Worker`]. #[derive(Clone)] pub(crate) struct Metrics { @@ -681,7 +600,6 @@ pub(crate) struct Metrics { dht_event_received: CounterVec, handle_value_found_event_failure: Counter, known_authorities_count: Gauge, - priority_group_size: Gauge, } impl Metrics { @@ -741,13 +659,6 @@ impl Metrics { )?, registry, )?, - priority_group_size: register( - Gauge::new( - "authority_discovery_priority_group_size", - "Number of addresses passed to the peer set as a priority group." - )?, - registry, - )?, }) } } diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index a2cd3f33e9215..13b259fbbb10d 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -1,31 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use libp2p::core::multiaddr::{Multiaddr, Protocol}; -use rand::seq::SliceRandom; use std::collections::HashMap; use sp_authority_discovery::AuthorityId; use sc_network::PeerId; -/// The maximum number of authority connections initialized through the authority discovery module. -/// -/// In other words the maximum size of the `authority` peerset priority group. -const MAX_NUM_AUTHORITY_CONN: usize = 10; - /// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. pub(super) struct AddrCache { authority_id_to_addresses: HashMap>, @@ -75,30 +71,6 @@ impl AddrCache { self.peer_id_to_authority_id.get(peer_id) } - /// Returns a single address for a random subset (maximum of [`MAX_NUM_AUTHORITY_CONN`]) of all - /// known authorities. - pub fn get_random_subset(&self) -> Vec { - let mut rng = rand::thread_rng(); - - let mut addresses = self - .authority_id_to_addresses - .iter() - .filter_map(|(_authority_id, addresses)| { - debug_assert!(!addresses.is_empty()); - addresses - .choose(&mut rng) - }) - .collect::>(); - - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - addresses.dedup(); - - addresses - .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) - .map(|a| (**a).clone()) - .collect() - } - /// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given /// [`AuthorityId`]s. pub fn retain_ids(&mut self, authority_ids: &Vec) { @@ -139,9 +111,8 @@ fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { mod tests { use super::*; - use libp2p::multihash; + use libp2p::multihash::{self, Multihash}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; - use rand::Rng; use sp_authority_discovery::{AuthorityId, AuthorityPair}; use sp_core::crypto::Pair; @@ -150,8 +121,8 @@ mod tests { struct TestAuthorityId(AuthorityId); impl Arbitrary for TestAuthorityId { - fn arbitrary(g: &mut G) -> Self { - let seed: [u8; 32] = g.gen(); + fn arbitrary(g: &mut Gen) -> Self { + let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); TestAuthorityId(AuthorityPair::from_seed_slice(&seed).unwrap().public()) } } @@ -160,10 +131,10 @@ mod tests { struct TestMultiaddr(Multiaddr); impl Arbitrary for TestMultiaddr { - fn arbitrary(g: &mut G) -> Self { - let seed: [u8; 32] = g.gen(); + fn arbitrary(g: &mut Gen) -> Self { + let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); let peer_id = PeerId::from_multihash( - multihash::wrap(multihash::Code::Sha2_256, &seed) + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap() ).unwrap(); let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() .unwrap() @@ -190,11 +161,6 @@ mod tests { cache.insert(second.0.clone(), vec![second.1.clone()]); cache.insert(third.0.clone(), vec![third.1.clone()]); - let subset = cache.get_random_subset(); - assert!( - subset.contains(&first.1) && subset.contains(&second.1) && subset.contains(&third.1), - "Expect initial subset to contain all authorities.", - ); assert_eq!( Some(&vec![third.1.clone()]), cache.get_addresses_by_authority_id(&third.0), @@ -208,12 +174,6 @@ mod tests { cache.retain_ids(&vec![first.0, second.0]); - let subset = cache.get_random_subset(); - assert!( - subset.contains(&first.1) || subset.contains(&second.1), - "Expected both first and second authority." - ); - assert!(!subset.contains(&third.1), "Did not expect address from third authority"); assert_eq!( None, cache.get_addresses_by_authority_id(&third.0), "Expect `get_addresses_by_authority_id` to not return `None` for third authority." diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 12adb8f232514..20c4c937096a1 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ use crate::worker::schema; -use std::{iter::FromIterator, sync::{Arc, Mutex}, task::Poll}; +use std::{sync::{Arc, Mutex}, task::Poll}; use async_trait::async_trait; use futures::channel::mpsc::{self, channel}; @@ -37,66 +37,6 @@ use substrate_test_runtime_client::runtime::Block; use super::*; -#[test] -fn interval_at_with_start_now() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now(), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_secs(1), - "Expected low resolution instant interval to fire within less than a second.", - ); -} - -#[test] -fn interval_at_is_queuing_ticks() { - let start = Instant::now(); - - let interval = interval_at(start, std::time::Duration::from_millis(100)); - - // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd - // at 200ms). - std::thread::sleep(Duration::from_millis(200)); - - futures::executor::block_on(async { - interval.take(3).collect::>().await; - }); - - // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is - // not queuing ticks. - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_millis(300), - "Expect interval to /queue/ events when not polled for a while.", - ); -} - -#[test] -fn interval_at_with_initial_delay() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now() + Duration::from_millis(100), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) > Duration::from_millis(100), - "Expected interval with initial delay not to fire right away.", - ); -} - #[derive(Clone)] pub(crate) struct TestApi { pub(crate) authorities: Vec, @@ -172,10 +112,6 @@ sp_api::mock_impl_runtime_apis! { pub enum TestNetworkEvent { GetCalled(kad::record::Key), PutCalled(kad::record::Key, Vec), - SetPriorityGroupCalled { - group_id: String, - peers: HashSet - }, } pub struct TestNetwork { @@ -185,7 +121,6 @@ pub struct TestNetwork { // vectors below. pub put_value_call: Arc)>>>, pub get_value_call: Arc>>, - pub set_priority_group_call: Arc)>>>, event_sender: mpsc::UnboundedSender, event_receiver: Option>, } @@ -207,7 +142,6 @@ impl Default for TestNetwork { ], put_value_call: Default::default(), get_value_call: Default::default(), - set_priority_group_call: Default::default(), event_sender: tx, event_receiver: Some(rx), } @@ -216,21 +150,6 @@ impl Default for TestNetwork { #[async_trait] impl NetworkProvider for TestNetwork { - async fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group_call - .lock() - .unwrap() - .push((group_id.clone(), peers.clone())); - self.event_sender.clone().unbounded_send(TestNetworkEvent::SetPriorityGroupCalled { - group_id, - peers, - }).unwrap(); - Ok(()) - } fn put_value(&self, key: kad::record::Key, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); self.event_sender.clone().unbounded_send(TestNetworkEvent::PutCalled(key, value)).unwrap(); @@ -356,14 +275,6 @@ fn publish_discover_cycle() { let (_dht_event_tx, dht_event_rx) = channel(1000); let network: Arc = Arc::new(Default::default()); - let node_a_multiaddr = { - let peer_id = network.local_peer_id(); - let address = network.external_addresses().pop().unwrap(); - - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }; let key_store = KeyStore::new(); @@ -425,19 +336,6 @@ fn publish_discover_cycle() { // Make authority discovery handle the event. worker.handle_dht_event(dht_event).await; - - worker.set_priority_group().await.unwrap(); - - // Expect authority discovery to set the priority set. - assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1); - - assert_eq!( - network.set_priority_group_call.lock().unwrap()[0], - ( - "authorities".to_string(), - HashSet::from_iter(vec![node_a_multiaddr.clone()].into_iter()) - ) - ); }.boxed_local().into()); pool.run(); diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 1b1d8921bcfb3..2047c85b0c872 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-basic-authorship" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,25 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -futures = "0.3.4" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -sc-proposer-metrics = { version = "0.8.0", path = "../proposer-metrics" } -tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } +sc-telemetry = { version = "3.0.0", path = "../telemetry" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +sc-proposer-metrics = { version = "0.9.0", path = "../proposer-metrics" } [dev-dependencies] -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } +sc-transaction-pool = { version = "3.0.0", path = "../transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -parking_lot = "0.10.0" +parking_lot = "0.11.1" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 89edfac0d4e98..73e6156615288 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -42,6 +42,15 @@ use std::marker::PhantomData; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_proposer_metrics::MetricsLink as PrometheusMetrics; +/// Default maximum block size in bytes used by [`Proposer`]. +/// +/// Can be overwritten by [`ProposerFactory::set_maximum_block_size`]. +/// +/// Be aware that there is also an upper packet size on what the networking code +/// will accept. If the block doesn't fit in such a package, it can not be +/// transferred to other nodes. +pub const DEFAULT_MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; + /// Proposer factory. pub struct ProposerFactory { spawn_handle: Box, @@ -53,6 +62,7 @@ pub struct ProposerFactory { metrics: PrometheusMetrics, /// phantom member to pin the `Backend` type. _phantom: PhantomData, + max_block_size: usize, } impl ProposerFactory { @@ -68,8 +78,17 @@ impl ProposerFactory { transaction_pool, metrics: PrometheusMetrics::new(prometheus), _phantom: PhantomData, + max_block_size: DEFAULT_MAX_BLOCK_SIZE, } } + + /// Set the maximum block size in bytes. + /// + /// The default value for the maximum block size is: + /// [`DEFAULT_MAX_BLOCK_SIZE`]. + pub fn set_maximum_block_size(&mut self, size: usize) { + self.max_block_size = size; + } } impl ProposerFactory @@ -103,6 +122,7 @@ impl ProposerFactory now, metrics: self.metrics.clone(), _phantom: PhantomData, + max_block_size: self.max_block_size, }; proposer @@ -143,6 +163,7 @@ pub struct Proposer { now: Box time::Instant + Send + Sync>, metrics: PrometheusMetrics, _phantom: PhantomData, + max_block_size: usize, } impl sp_consensus::Proposer for @@ -187,10 +208,7 @@ impl sp_consensus::Proposer for })); async move { - match rx.await { - Ok(x) => x, - Err(err) => Err(sp_blockchain::Error::Msg(err.to_string())) - } + rx.await? }.boxed() } } @@ -334,7 +352,12 @@ impl Proposer error!("Failed to verify block encoding/decoding"); } - if let Err(err) = evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) { + if let Err(err) = evaluation::evaluate_initial( + &block, + &self.parent_hash, + self.parent_number, + self.max_block_size, + ) { error!("Failed to evaluate authored block: {:?}", err); } @@ -385,6 +408,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), @@ -443,6 +467,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), @@ -483,6 +508,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), @@ -550,6 +576,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 9b0c491508231..224dccd36b533 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -34,6 +34,7 @@ //! # let spawner = sp_core::testing::TaskExecutor::new(); //! # let txpool = BasicPool::new_full( //! # Default::default(), +//! # true.into(), //! # None, //! # spawner.clone(), //! # client.clone(), @@ -71,4 +72,4 @@ mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, Proposer}; +pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_MAX_BLOCK_SIZE}; diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 0c3d289bbcbc1..dda5edde36db5 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-block-builder" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-client-api = { version = "2.0.0", path = "../api" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-block-builder = { version = "3.0.0", path = "../../primitives/block-builder" } +sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } +sc-client-api = { version = "3.0.0", path = "../api" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } +sp-trie = { version = "3.0.0", path = "../../primitives/trie" } diff --git a/client/block-builder/README.md b/client/block-builder/README.md index c691f6692abff..b105d4203362f 100644 --- a/client/block-builder/README.md +++ b/client/block-builder/README.md @@ -1,7 +1,7 @@ Substrate block builder This crate provides the [`BlockBuilder`] utility and the corresponding runtime api -[`BlockBuilder`](sp_block_builder::BlockBuilder).Error +[`BlockBuilder`](https://docs.rs/sc-block-builder/latest/sc_block_builder/struct.BlockBuilder.html).Error The block builder utility is used in the node as an abstraction over the runtime api to initialize a block, to push extrinsics and to finalize a block. diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 904667b1afc6e..5a7e0277d9e8c 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ //! Substrate block builder //! //! This crate provides the [`BlockBuilder`] utility and the corresponding runtime api -//! [`BlockBuilder`](sp_block_builder::BlockBuilder).Error +//! [`BlockBuilder`](sp_block_builder::BlockBuilder). //! //! The block builder utility is used in the node as an abstraction over the runtime api to //! initialize a block, to push extrinsics and to finalize a block. @@ -212,7 +212,7 @@ where &state, changes_trie_state.as_ref(), parent_hash, - )?; + ).map_err(|e| sp_blockchain::Error::StorageChanges(e))?; Ok(BuiltBlock { block: ::new(header, self.extrinsics), diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 79f14058aad6d..27850cc8400b3 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-chain-spec-derive = { version = "2.0.0", path = "./derive" } -impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8.0", path = "../network" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sc-chain-spec-derive = { version = "3.0.0", path = "./derive" } +impl-trait-for-tuples = "0.2.1" +sc-network = { version = "0.9.0", path = "../network" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-chain-spec = { version = "2.0.0", path = "../../primitives/chain-spec" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-consensus-babe = { version = "0.8.0-rc6", path = "../consensus/babe" } -sp-consensus-babe = { version = "0.8.0-rc6", path = "../../primitives/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0-rc6", path = "../consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0-rc6", path = "../finality-grandpa" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-chain-spec = { version = "3.0.0", path = "../../primitives/chain-spec" } +sc-telemetry = { version = "3.0.0", path = "../telemetry" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-consensus-babe = { version = "0.9.0", path = "../consensus/babe" } +sp-consensus-babe = { version = "0.9.0", path = "../../primitives/consensus/babe" } +sc-consensus-epochs = { version = "0.9.0", path = "../consensus/epochs" } +sc-finality-grandpa = { version = "0.9.0", path = "../finality-grandpa" } diff --git a/client/chain-spec/README.md b/client/chain-spec/README.md index 59a66aa5ace79..5525affbed81c 100644 --- a/client/chain-spec/README.md +++ b/client/chain-spec/README.md @@ -4,7 +4,7 @@ This crate contains structs and utilities to declare a runtime-specific configuration file (a.k.a chain spec). Basic chain spec type containing all required parameters is -[`ChainSpec`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.ChainSpec.html). It can be extended with +[`ChainSpec`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.GenericChainSpec.html). It can be extended with additional options that contain configuration specific to your chain. Usually the extension is going to be an amalgamate of types exposed by Substrate core modules. To allow the core modules to retrieve diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index 6826168a206a5..4f3484df31cba 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -18,6 +18,6 @@ proc-macro = true proc-macro-crate = "0.1.4" proc-macro2 = "1.0.6" quote = "1.0.3" -syn = "1.0.7" +syn = "1.0.58" [dev-dependencies] diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index ded961a6da815..bb72270ed551a 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use proc_macro2::{Span, TokenStream}; use quote::quote; diff --git a/client/chain-spec/derive/src/lib.rs b/client/chain-spec/derive/src/lib.rs index 0dc053f7e301e..53f0c69491ecd 100644 --- a/client/chain-spec/derive/src/lib.rs +++ b/client/chain-spec/derive/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Macros to derive chain spec extension traits implementation. diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 39c47e32908df..2faf95568290e 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/chain-spec/src/extension.rs b/client/chain-spec/src/extension.rs index c0338203eb10e..c0352529f8673 100644 --- a/client/chain-spec/src/extension.rs +++ b/client/chain-spec/src/extension.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Chain Spec extensions helpers. diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 94ed93758bb2d..ee4f757f8cf0e 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate chain configurations. //! @@ -20,7 +22,7 @@ //! a runtime-specific configuration file (a.k.a chain spec). //! //! Basic chain spec type containing all required parameters is -//! [`ChainSpec`](./struct.ChainSpec.html). It can be extended with +//! [`GenericChainSpec`]. It can be extended with //! additional options that contain configuration specific to your chain. //! Usually the extension is going to be an amalgamate of types exposed //! by Substrate core modules. To allow the core modules to retrieve diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index b7e798a3ba1c1..03d23c5aec3e2 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-cli" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" @@ -14,44 +14,38 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -atty = "0.2.13" -regex = "1.3.4" -ansi_term = "0.12.1" +regex = "1.4.2" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } -futures = "0.3.4" +futures = "0.3.9" fdlimit = "0.2.1" -libp2p = "0.29.1" -parity-scale-codec = "1.3.0" +libp2p = "0.34.0" +parity-scale-codec = "2.0.0" hex = "0.4.2" rand = "0.7.3" -bip39 = "0.6.0-beta.1" +tiny-bip39 = "0.8.0" serde_json = "1.0.41" -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0", path = "../network" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sc-service = { version = "0.8.0", default-features = false, path = "../service" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sc-keystore = { version = "3.0.0", path = "../keystore" } +sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sc-network = { version = "0.9.0", path = "../network" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sp-version = { version = "3.0.0", path = "../../primitives/version" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sc-service = { version = "0.9.0", default-features = false, path = "../service" } +sc-telemetry = { version = "3.0.0", path = "../telemetry" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } names = "0.11.0" structopt = "0.3.8" -sc-tracing = { version = "2.0.0", path = "../tracing" } +sc-tracing = { version = "3.0.0", path = "../tracing" } chrono = "0.4.10" serde = "1.0.111" -tracing = "0.1.10" -tracing-log = "0.1.1" -tracing-subscriber = "0.2.10" -sc-cli-proc-macro = { version = "2.0.0", path = "./proc-macro" } thiserror = "1.0.21" [target.'cfg(not(target_os = "unknown"))'.dependencies] -rpassword = "4.0.1" +rpassword = "5.0.0" [dev-dependencies] tempfile = "3.1.0" diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 85400f2a27759..2ebfa38925e23 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 616c5139f64f0..3d66e752b81ec 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index b536d4f26bb6c..74e2d34f975b2 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 497531ad393ba..55f05d9d7f303 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index c078db0d8aea9..2211b3131a013 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs index 86b039ce6a4c5..08b5f20772363 100644 --- a/client/cli/src/commands/generate.rs +++ b/client/cli/src/commands/generate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index ad292e4712d84..ec22c6298adb6 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index 00f8ec43b02fe..89f70d06813ce 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/insert.rs b/client/cli/src/commands/insert.rs deleted file mode 100644 index fc307e45e7ce4..0000000000000 --- a/client/cli/src/commands/insert.rs +++ /dev/null @@ -1,94 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of the `insert` subcommand - -use crate::{Error, KeystoreParams, CryptoSchemeFlag, SharedParams, utils, with_crypto_scheme}; -use std::sync::Arc; -use structopt::StructOpt; -use sp_core::crypto::KeyTypeId; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use std::convert::TryFrom; -use sc_service::config::KeystoreConfig; -use sc_keystore::LocalKeystore; -use sp_core::crypto::SecretString; - -/// The `insert` command -#[derive(Debug, StructOpt)] -#[structopt( - name = "insert", - about = "Insert a key to the keystore of a node." -)] -pub struct InsertCmd { - /// The secret key URI. - /// If the value is a file, the file content is used as URI. - /// If not given, you will be prompted for the URI. - #[structopt(long)] - suri: Option, - - /// Key type, examples: "gran", or "imon" - #[structopt(long)] - key_type: String, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub keystore_params: KeystoreParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub crypto_scheme: CryptoSchemeFlag, -} - -impl InsertCmd { - /// Run the command - pub fn run(&self) -> Result<(), Error> { - let suri = utils::read_uri(self.suri.as_ref())?; - let base_path = self.shared_params.base_path.as_ref() - .ok_or_else(|| Error::MissingBasePath)?; - - let (keystore, public) = match self.keystore_params.keystore_config(base_path)? { - KeystoreConfig::Path { path, password } => { - let public = with_crypto_scheme!( - self.crypto_scheme.scheme, - to_vec(&suri, password.clone()) - )?; - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password)?); - (keystore, public) - }, - _ => unreachable!("keystore_config always returns path and password; qed") - }; - - let key_type = KeyTypeId::try_from(self.key_type.as_str()) - .map_err(|_e| { - Error::KeyTypeInvalid - })?; - - SyncCryptoStore::insert_unknown(&*keystore, key_type, &suri, &public[..]) - .map_err(|_| Error::KeyStoreOperation)?; - - Ok(()) - } -} - -fn to_vec(uri: &str, pass: Option) -> Result, Error> { - let p = utils::pair_from_suri::

(uri, pass)?; - Ok(p.public().as_ref().to_vec()) -} diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs new file mode 100644 index 0000000000000..6e4324deed042 --- /dev/null +++ b/client/cli/src/commands/insert_key.rs @@ -0,0 +1,173 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `insert` subcommand + +use crate::{ + Error, KeystoreParams, CryptoSchemeFlag, SharedParams, utils, with_crypto_scheme, + SubstrateCli, +}; +use std::{sync::Arc, convert::TryFrom}; +use structopt::StructOpt; +use sp_core::{crypto::KeyTypeId, crypto::SecretString}; +use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +use sc_keystore::LocalKeystore; +use sc_service::config::{KeystoreConfig, BasePath}; + +/// The `insert` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "insert", + about = "Insert a key to the keystore of a node." +)] +pub struct InsertKeyCmd { + /// The secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + suri: Option, + + /// Key type, examples: "gran", or "imon" + #[structopt(long)] + key_type: String, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl InsertKeyCmd { + /// Run the command + pub fn run(&self, cli: &C) -> Result<(), Error> { + let suri = utils::read_uri(self.suri.as_ref())?; + let base_path = self.shared_params + .base_path() + .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); + let chain_id = self.shared_params.chain_id(self.shared_params.is_dev()); + let chain_spec = cli.load_spec(&chain_id)?; + let config_dir = base_path.config_dir(chain_spec.id()); + + let (keystore, public) = match self.keystore_params.keystore_config(&config_dir)? { + (_, KeystoreConfig::Path { path, password }) => { + let public = with_crypto_scheme!( + self.crypto_scheme.scheme, + to_vec(&suri, password.clone()) + )?; + let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password)?); + (keystore, public) + }, + _ => unreachable!("keystore_config always returns path and password; qed") + }; + + let key_type = KeyTypeId::try_from(self.key_type.as_str()).map_err(|_| Error::KeyTypeInvalid)?; + + SyncCryptoStore::insert_unknown(&*keystore, key_type, &suri, &public[..]) + .map_err(|_| Error::KeyStoreOperation)?; + + Ok(()) + } +} + +fn to_vec(uri: &str, pass: Option) -> Result, Error> { + let p = utils::pair_from_suri::

(uri, pass)?; + Ok(p.public().as_ref().to_vec()) +} + +#[cfg(test)] +mod tests { + use super::*; + use structopt::StructOpt; + use tempfile::TempDir; + use sp_core::{sr25519::Pair, Pair as _, Public}; + use sc_service::{ChainSpec, GenericChainSpec, ChainType, NoExtension}; + + struct Cli; + + impl SubstrateCli for Cli { + fn impl_name() -> String { + "test".into() + } + + fn impl_version() -> String { + "2.0".into() + } + + fn description() -> String { + "test".into() + } + + fn support_url() -> String { + "test.test".into() + } + + fn copyright_start_year() -> i32 { + 2021 + } + + fn author() -> String { + "test".into() + } + + fn native_runtime_version(_: &Box) -> &'static sp_version::RuntimeVersion { + unimplemented!("Not required in tests") + } + + fn load_spec(&self, _: &str) -> std::result::Result, String> { + Ok( + Box::new( + GenericChainSpec::from_genesis( + "test", + "test_id", + ChainType::Development, + || unimplemented!("Not required in tests"), + Vec::new(), + None, + None, + None, + NoExtension::None, + ), + ), + ) + } + } + + #[test] + fn insert_with_custom_base_path() { + let path = TempDir::new().unwrap(); + let path_str = format!("{}", path.path().display()); + let (key, uri, _) = Pair::generate_with_phrase(None); + + let inspect = InsertKeyCmd::from_iter( + &["insert-key", "-d", &path_str, "--key-type", "test", "--suri", &uri], + ); + assert!(inspect.run(&Cli).is_ok()); + + let keystore = LocalKeystore::open( + path.path().join("chains").join("test_id").join("keystore"), + None, + ).unwrap(); + assert!(keystore.has_keys(&[(key.public().to_raw_vec(), KeyTypeId(*b"test"))])); + } +} diff --git a/client/cli/src/commands/inspect_key.rs b/client/cli/src/commands/inspect_key.rs index fb3a7ef4f3b44..2642eee88adcd 100644 --- a/client/cli/src/commands/inspect_key.rs +++ b/client/cli/src/commands/inspect_key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index be0b88589d5e9..4db32aefb5fbb 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/client/cli/src/commands/key.rs b/client/cli/src/commands/key.rs index e5bce08145cb8..546454159718d 100644 --- a/client/cli/src/commands/key.rs +++ b/client/cli/src/commands/key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ //! Key related CLI utilities -use crate::Error; +use crate::{Error, SubstrateCli}; use structopt::StructOpt; use super::{ - insert::InsertCmd, + insert_key::InsertKeyCmd, inspect_key::InspectKeyCmd, generate::GenerateCmd, inspect_node_key::InspectNodeKeyCmd, @@ -45,17 +45,17 @@ pub enum KeySubcommand { InspectNodeKey(InspectNodeKeyCmd), /// Insert a key to the keystore of a node. - Insert(InsertCmd), + Insert(InsertKeyCmd), } impl KeySubcommand { /// run the key subcommands - pub fn run(&self) -> Result<(), Error> { + pub fn run(&self, cli: &C) -> Result<(), Error> { match self { KeySubcommand::GenerateNodeKey(cmd) => cmd.run(), KeySubcommand::Generate(cmd) => cmd.run(), KeySubcommand::InspectKey(cmd) => cmd.run(), - KeySubcommand::Insert(cmd) => cmd.run(), + KeySubcommand::Insert(cmd) => cmd.run(cli), KeySubcommand::InspectNodeKey(cmd) => cmd.run(), } } diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 9867f61cd277f..8c0d6acd6a511 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -28,7 +28,7 @@ mod revert_cmd; mod run_cmd; mod generate_node_key; mod generate; -mod insert; +mod insert_key; mod inspect_node_key; mod inspect_key; mod key; @@ -43,7 +43,7 @@ pub use self::{ purge_chain_cmd::PurgeChainCmd, sign::SignCmd, generate::GenerateCmd, - insert::InsertCmd, + insert_key::InsertKeyCmd, inspect_key::InspectKeyCmd, generate_node_key::GenerateNodeKeyCmd, inspect_node_key::InspectNodeKeyCmd, diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 9c9c6e91fb241..1902d92e6345a 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index b2e3c1bf8e2b6..2745ce2c65241 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 019b760e5b4ae..bbb8d6f68d7f9 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -75,7 +75,8 @@ pub struct RunCmd { /// Listen to all RPC interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// server to filter out dangerous methods. More details: + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] pub rpc_external: bool, @@ -105,7 +106,7 @@ pub struct RunCmd { /// Listen to all Websocket interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// server to filter out dangerous methods. More details: . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] pub ws_external: bool, @@ -142,7 +143,7 @@ pub struct RunCmd { /// /// A comma-separated list of origins (protocol://domain or special `null` /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost and https://polkadot.js.org origins. When running in + /// allow localhost and origins. When running in /// --dev mode the default is to allow all origins. #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] pub rpc_cors: Option, diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs index 605fd5b12313f..a39e14697b995 100644 --- a/client/cli/src/commands/sign.rs +++ b/client/cli/src/commands/sign.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 6e48d04e1328b..1bbff392eca43 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -57,7 +57,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { /// 2. Try to construct the `Pair` while using `uri` as input for [`sp_core::Pair::from_string_with_seed`]. /// /// 3. Try to construct the `Pair::Public` while using `uri` as input for -/// [`sp_core::Pair::Public::from_string_with_version`]. +/// [`sp_core::crypto::Ss58Codec::from_string_with_version`]. pub fn print_from_uri( uri: &str, password: Option, diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index 33b9025c13fbc..da47e8bb26cc8 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index 15abc04002f4c..f5bd5a06060c6 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index ab7a335c1ce64..247f6d2fddb33 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,7 +21,7 @@ use crate::arg_enums::Database; use crate::error::Result; use crate::{ - init_logger, DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, + DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, }; use log::warn; @@ -32,7 +32,9 @@ use sc_service::config::{ NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }; -use sc_service::{ChainSpec, TracingReceiver}; +use sc_service::{ChainSpec, TracingReceiver, KeepBlocks, TransactionStorageMode}; +use sc_telemetry::{TelemetryHandle, TelemetrySpan}; +use sc_tracing::logging::LoggerBuilder; use std::net::SocketAddr; use std::path::PathBuf; @@ -47,7 +49,7 @@ const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000; /// Default configuration values used by Substrate /// -/// These values will be used by [`CliConfiguritation`] to set +/// These values will be used by [`CliConfiguration`] to set /// default values for e.g. the listen port or the RPC port. pub trait DefaultConfigurationValues { /// The port Substrate should listen on for p2p connections. @@ -186,12 +188,12 @@ pub trait CliConfiguration: Sized { /// Get the keystore configuration. /// - /// Bu default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses + /// By default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses /// `KeystoreConfig::InMemory`. - fn keystore_config(&self, base_path: &PathBuf) -> Result { + fn keystore_config(&self, config_dir: &PathBuf) -> Result<(Option, KeystoreConfig)> { self.keystore_params() - .map(|x| x.keystore_config(base_path)) - .unwrap_or(Ok(KeystoreConfig::InMemory)) + .map(|x| x.keystore_config(config_dir)) + .unwrap_or_else(|| Ok((None, KeystoreConfig::InMemory))) } /// Get the database cache size. @@ -203,6 +205,13 @@ pub trait CliConfiguration: Sized { .unwrap_or_default()) } + /// Get the database transaction storage scheme. + fn database_transaction_storage(&self) -> Result { + Ok(self.database_params() + .map(|x| x.transaction_storage()) + .unwrap_or(TransactionStorageMode::BlockBody)) + } + /// Get the database backend variant. /// /// By default this is retrieved from `DatabaseParams` if it is available. Otherwise its `None`. @@ -244,16 +253,26 @@ pub trait CliConfiguration: Sized { Ok(Default::default()) } - /// Get the pruning mode. + /// Get the state pruning mode. /// /// By default this is retrieved from `PruningMode` if it is available. Otherwise its /// `PruningMode::default()`. - fn pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { + fn state_pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { self.pruning_params() - .map(|x| x.pruning(unsafe_pruning, role)) + .map(|x| x.state_pruning(unsafe_pruning, role)) .unwrap_or_else(|| Ok(Default::default())) } + /// Get the block pruning mode. + /// + /// By default this is retrieved from `block_pruning` if it is available. Otherwise its + /// `KeepBlocks::All`. + fn keep_blocks(&self) -> Result { + self.pruning_params() + .map(|x| x.keep_blocks()) + .unwrap_or_else(|| Ok(KeepBlocks::All)) + } + /// Get the chain ID (string). /// /// By default this is retrieved from `SharedParams`. @@ -408,22 +427,18 @@ pub trait CliConfiguration: Sized { /// Get the tracing targets from the current object (if any) /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// By default this is retrieved from [`SharedParams`] if it is available. Otherwise its /// `None`. fn tracing_targets(&self) -> Result> { - Ok(self.import_params() - .map(|x| x.tracing_targets()) - .unwrap_or_else(|| Default::default())) + Ok(self.shared_params().tracing_targets()) } /// Get the TracingReceiver value from the current object /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// By default this is retrieved from [`SharedParams`] if it is available. Otherwise its /// `TracingReceiver::default()`. fn tracing_receiver(&self) -> Result { - Ok(self.import_params() - .map(|x| x.tracing_receiver()) - .unwrap_or_default()) + Ok(self.shared_params().tracing_receiver()) } /// Get the node key from the current object @@ -455,18 +470,15 @@ pub trait CliConfiguration: Sized { &self, cli: &C, task_executor: TaskExecutor, + telemetry_handle: Option, ) -> Result { let is_dev = self.is_dev()?; let chain_id = self.chain_id(is_dev)?; - let chain_spec = cli.load_spec(chain_id.as_str())?; + let chain_spec = cli.load_spec(&chain_id)?; let base_path = self .base_path()? .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); - let config_dir = base_path - .path() - .to_path_buf() - .join("chains") - .join(chain_spec.id()); + let config_dir = base_path.config_dir(chain_spec.id()); let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); let client_id = C::client_id(); let database_cache_size = self.database_cache_size()?.unwrap_or(128); @@ -475,6 +487,14 @@ pub trait CliConfiguration: Sized { let role = self.role(is_dev)?; let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); let is_validator = role.is_network_authority(); + let (keystore_remote, keystore) = self.keystore_config(&config_dir)?; + let telemetry_endpoints = telemetry_handle + .as_ref() + .and_then(|_| self.telemetry_endpoints(&chain_spec).transpose()) + .transpose()? + // Don't initialise telemetry if `telemetry_endpoints` == Some([]) + .filter(|x| !x.is_empty()); + let telemetry_span = telemetry_endpoints.as_ref().map(|_| TelemetrySpan::new()); let unsafe_pruning = self .import_params() @@ -495,11 +515,14 @@ pub trait CliConfiguration: Sized { node_key, DCV::p2p_listen_port(), )?, - keystore: self.keystore_config(&config_dir)?, + keystore_remote, + keystore, database: self.database_config(&config_dir, database_cache_size, database)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, - pruning: self.pruning(unsafe_pruning, &role)?, + state_pruning: self.state_pruning(unsafe_pruning, &role)?, + keep_blocks: self.keep_blocks()?, + transaction_storage: self.database_transaction_storage()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, @@ -510,7 +533,8 @@ pub trait CliConfiguration: Sized { rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_cors: self.rpc_cors(is_dev)?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, - telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, + telemetry_endpoints, + telemetry_span, telemetry_external_transport: self.telemetry_external_transport()?, default_heap_pages: self.default_heap_pages()?, offchain_worker: self.offchain_worker(&role)?, @@ -519,12 +543,14 @@ pub trait CliConfiguration: Sized { dev_key_seed: self.dev_key_seed(is_dev)?, tracing_targets: self.tracing_targets()?, tracing_receiver: self.tracing_receiver()?, + disable_log_reloading: self.is_log_filter_reloading_disabled()?, chain_spec, max_runtime_instances, announce_block: self.announce_block()?, role, base_path: Some(base_path), informant_output_format: Default::default(), + telemetry_handle, }) } @@ -538,6 +564,16 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } + /// Is log reloading disabled (enabled by default) + fn is_log_filter_reloading_disabled(&self) -> Result { + Ok(self.shared_params().is_log_filter_reloading_disabled()) + } + + /// Should the log color output be disabled? + fn disable_log_color(&self) -> Result { + Ok(self.shared_params().disable_log_color()) + } + /// Initialize substrate. This must be done only once per process. /// /// This method: @@ -545,28 +581,38 @@ pub trait CliConfiguration: Sized { /// 1. Sets the panic handler /// 2. Initializes the logger /// 3. Raises the FD limit - fn init(&self) -> Result<()> { - let logger_pattern = self.log_filters()?; - let tracing_receiver = self.tracing_receiver()?; - let tracing_targets = self.tracing_targets()?; - + fn init(&self) -> Result { sp_panic_handler::set(&C::support_url(), &C::impl_version()); - if let Err(e) = init_logger(&logger_pattern, tracing_receiver, tracing_targets) { - log::warn!("💬 Problem initializing global logging framework: {:}", e) + let mut logger = LoggerBuilder::new(self.log_filters()?); + logger.with_log_reloading(!self.is_log_filter_reloading_disabled()?); + + if let Some(transport) = self.telemetry_external_transport()? { + logger.with_transport(transport); } + if let Some(tracing_targets) = self.tracing_targets()? { + let tracing_receiver = self.tracing_receiver()?; + logger.with_profiling(tracing_receiver, tracing_targets); + } + + if self.disable_log_color()? { + logger.with_colors(false); + } + + let telemetry_worker = logger.init()?; + if let Some(new_limit) = fdlimit::raise_fd_limit() { if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { warn!( "Low open file descriptor limit configured for the process. \ - Current value: {:?}, recommended value: {:?}.", + Current value: {:?}, recommended value: {:?}.", new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, ); } } - Ok(()) + Ok(telemetry_worker) } } diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index 36c963f3e8c97..c5784b2018172 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,35 +25,32 @@ pub type Result = std::result::Result; /// Error type for the CLI. #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Io error #[error(transparent)] Io(#[from] std::io::Error), - /// Cli error + #[error(transparent)] Cli(#[from] structopt::clap::Error), - /// Service error + #[error(transparent)] Service(#[from] sc_service::Error), - /// Client error + #[error(transparent)] Client(#[from] sp_blockchain::Error), - /// scale codec error + #[error(transparent)] Codec(#[from] parity_scale_codec::Error), - /// Input error + #[error("Invalid input: {0}")] Input(String), - /// Invalid listen multiaddress + #[error("Invalid listen multiaddress")] InvalidListenMultiaddress, - /// Application specific error chain sequence forwarder. - #[error(transparent)] - Application(#[from] Box), - /// URI error. + #[error("Invalid URI; expecting either a secret URI or a public URI.")] InvalidUri(crypto::PublicError), - /// Signature length mismatch. + #[error("Signature has an invalid length. Read {read} bytes, expected {expected} bytes")] SignatureInvalidLength { /// Amount of signature bytes read. @@ -61,28 +58,28 @@ pub enum Error { /// Expected number of signature bytes. expected: usize, }, - /// Missing base path argument. - #[error("The base path is missing, please provide one")] - MissingBasePath, - /// Unknown key type specifier or missing key type specifier. + #[error("Unknown key type, must be a known 4-character sequence")] KeyTypeInvalid, - /// Signature verification failed. + #[error("Signature verification failed")] SignatureInvalid, - /// Storing a given key failed. + #[error("Key store operation failed")] KeyStoreOperation, - /// An issue with the underlying key storage was encountered. + #[error("Key storage issue encountered")] KeyStorage(#[from] sc_keystore::Error), - /// Bytes are not decodable when interpreted as hexadecimal string. - #[error("Invalid hex base data")] + + #[error("Invalid hexadecimal string data")] HexDataConversion(#[from] hex::FromHexError), - /// Shortcut type to specify types on the fly, discouraged. - #[deprecated = "Use `Forwarded` with an error type instead."] - #[error("Other: {0}")] - Other(String), + + /// Application specific error chain sequence forwarder. + #[error(transparent)] + Application(#[from] Box), + + #[error(transparent)] + GlobalLoggerError(#[from] sc_tracing::logging::Error), } impl std::convert::From<&str> for Error { @@ -93,7 +90,7 @@ impl std::convert::From<&str> for Error { impl std::convert::From for Error { fn from(s: String) -> Error { - Error::Input(s.to_string()) + Error::Input(s) } } diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c25693dc418b1..602c53272ea59 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,13 +21,11 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] #![warn(unused_imports)] -#![warn(unused_crate_dependencies)] pub mod arg_enums; mod commands; mod config; mod error; -mod logging; mod params; mod runner; @@ -37,9 +35,10 @@ pub use config::*; pub use error::*; pub use params::*; pub use runner::*; -pub use sc_cli_proc_macro::*; pub use sc_service::{ChainSpec, Role}; use sc_service::{Configuration, TaskExecutor}; +use sc_telemetry::TelemetryHandle; +pub use sc_tracing::logging::LoggerBuilder; pub use sp_version::RuntimeVersion; use std::io::Write; pub use structopt; @@ -47,13 +46,6 @@ use structopt::{ clap::{self, AppSettings}, StructOpt, }; -#[doc(hidden)] -pub use tracing; -use tracing_subscriber::{ - filter::Directive, fmt::time::ChronoLocal, layer::SubscriberExt, FmtSubscriber, Layer, -}; - -pub use logging::PREFIX_LOG_SPAN; /// Substrate client CLI /// @@ -78,7 +70,8 @@ pub trait SubstrateCli: Sized { /// Extracts the file name from `std::env::current_exe()`. /// Resorts to the env var `CARGO_PKG_NAME` in case of Error. fn executable_name() -> String { - std::env::current_exe().ok() + std::env::current_exe() + .ok() .and_then(|e| e.file_name().map(|s| s.to_os_string())) .and_then(|w| w.into_string().ok()) .unwrap_or_else(|| env!("CARGO_PKG_NAME").into()) @@ -166,7 +159,7 @@ pub trait SubstrateCli: Sized { let _ = std::io::stdout().write_all(e.message.as_bytes()); std::process::exit(0); } - }, + } }; ::from_clap(&matches) @@ -221,243 +214,18 @@ pub trait SubstrateCli: Sized { &self, command: &T, task_executor: TaskExecutor, + telemetry_handle: Option, ) -> error::Result { - command.create_configuration(self, task_executor) + command.create_configuration(self, task_executor, telemetry_handle) } /// Create a runner for the command provided in argument. This will create a Configuration and /// a tokio runtime fn create_runner(&self, command: &T) -> error::Result> { - command.init::()?; - Runner::new(self, command) + let telemetry_worker = command.init::()?; + Runner::new(self, command, telemetry_worker) } /// Native runtime version. fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; } - -/// Initialize the global logger -/// -/// This sets various global logging and tracing instances and thus may only be called once. -pub fn init_logger( - pattern: &str, - tracing_receiver: sc_tracing::TracingReceiver, - profiling_targets: Option, -) -> std::result::Result<(), String> { - fn parse_directives(dirs: impl AsRef) -> Vec { - dirs.as_ref() - .split(',') - .filter_map(|s| s.parse().ok()) - .collect() - } - - if let Err(e) = tracing_log::LogTracer::init() { - return Err(format!( - "Registering Substrate logger failed: {:}!", e - )) - } - - let mut env_filter = tracing_subscriber::EnvFilter::default() - // Disable info logging by default for some modules. - .add_directive("ws=off".parse().expect("provided directive is valid")) - .add_directive("yamux=off".parse().expect("provided directive is valid")) - .add_directive("cranelift_codegen=off".parse().expect("provided directive is valid")) - // Set warn logging by default for some modules. - .add_directive("cranelift_wasm=warn".parse().expect("provided directive is valid")) - .add_directive("hyper=warn".parse().expect("provided directive is valid")) - // Enable info for others. - .add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()); - - if let Ok(lvl) = std::env::var("RUST_LOG") { - if lvl != "" { - // We're not sure if log or tracing is available at this moment, so silently ignore the - // parse error. - for directive in parse_directives(lvl) { - env_filter = env_filter.add_directive(directive); - } - } - } - - if pattern != "" { - // We're not sure if log or tracing is available at this moment, so silently ignore the - // parse error. - for directive in parse_directives(pattern) { - env_filter = env_filter.add_directive(directive); - } - } - - // If we're only logging `INFO` entries then we'll use a simplified logging format. - let simple = match Layer::::max_level_hint(&env_filter) { - Some(level) if level <= tracing_subscriber::filter::LevelFilter::INFO => true, - _ => false, - }; - - // Always log the special target `sc_tracing`, overrides global level. - // NOTE: this must be done after we check the `max_level_hint` otherwise - // it is always raised to `TRACE`. - env_filter = env_filter.add_directive( - "sc_tracing=trace" - .parse() - .expect("provided directive is valid"), - ); - - // Make sure to include profiling targets in the filter - if let Some(profiling_targets) = profiling_targets.clone() { - for directive in parse_directives(profiling_targets) { - env_filter = env_filter.add_directive(directive); - } - } - - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = isatty; - let timer = ChronoLocal::with_format(if simple { - "%Y-%m-%d %H:%M:%S".to_string() - } else { - "%Y-%m-%d %H:%M:%S%.3f".to_string() - }); - - let subscriber = FmtSubscriber::builder() - .with_env_filter(env_filter) - .with_writer(std::io::stderr) - .event_format(logging::EventFormat { - timer, - ansi: enable_color, - display_target: !simple, - display_level: !simple, - display_thread_name: !simple, - }) - .finish().with(logging::NodeNameLayer); - - if let Some(profiling_targets) = profiling_targets { - let profiling = sc_tracing::ProfilingLayer::new(tracing_receiver, &profiling_targets); - - if let Err(e) = tracing::subscriber::set_global_default(subscriber.with(profiling)) { - return Err(format!( - "Registering Substrate tracing subscriber failed: {:}!", e - )) - } - } else { - if let Err(e) = tracing::subscriber::set_global_default(subscriber) { - return Err(format!( - "Registering Substrate tracing subscriber failed: {:}!", e - )) - } - } - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate as sc_cli; - use std::{env, process::Command}; - use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata}; - - #[test] - fn test_logger_filters() { - let test_pattern = "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); - - tracing::dispatcher::get_default(|dispatcher| { - let test_filter = |target, level| { - struct DummyCallSite; - impl Callsite for DummyCallSite { - fn set_interest(&self, _: Interest) {} - fn metadata(&self) -> &Metadata<'_> { - unreachable!(); - } - } - - let metadata = tracing::metadata!( - name: "", - target: target, - level: level, - fields: &[], - callsite: &DummyCallSite, - kind: Kind::SPAN, - ); - - dispatcher.enabled(&metadata) - }; - - assert!(test_filter("afg", Level::INFO)); - assert!(test_filter("afg", Level::DEBUG)); - assert!(!test_filter("afg", Level::TRACE)); - - assert!(test_filter("sync", Level::TRACE)); - assert!(test_filter("client", Level::WARN)); - - assert!(test_filter("telemetry", Level::TRACE)); - assert!(test_filter("something-with-dash", Level::ERROR)); - }); - } - - const EXPECTED_LOG_MESSAGE: &'static str = "yeah logging works as expected"; - - #[test] - fn dash_in_target_name_works() { - let executable = env::current_exe().unwrap(); - let output = Command::new(executable) - .env("ENABLE_LOGGING", "1") - .args(&["--nocapture", "log_something_with_dash_target_name"]) - .output() - .unwrap(); - - let output = String::from_utf8(output.stderr).unwrap(); - assert!(output.contains(EXPECTED_LOG_MESSAGE)); - } - - /// This is no actual test, it will be used by the `dash_in_target_name_works` test. - /// The given test will call the test executable to only execute this test that - /// will only print `EXPECTED_LOG_MESSAGE` through logging while using a target - /// name that contains a dash. This ensures that targets names with dashes work. - #[test] - fn log_something_with_dash_target_name() { - if env::var("ENABLE_LOGGING").is_ok() { - let test_pattern = "test-target=info"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); - - log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE); - } - } - - const EXPECTED_NODE_NAME: &'static str = "THE_NODE"; - - #[test] - fn prefix_in_log_lines() { - let re = regex::Regex::new(&format!( - r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} \[{}\] {}$", - EXPECTED_NODE_NAME, - EXPECTED_LOG_MESSAGE, - )).unwrap(); - let executable = env::current_exe().unwrap(); - let output = Command::new(executable) - .env("ENABLE_LOGGING", "1") - .args(&["--nocapture", "prefix_in_log_lines_entrypoint"]) - .output() - .unwrap(); - - let output = String::from_utf8(output.stderr).unwrap(); - assert!( - re.is_match(output.trim()), - format!("Expected:\n{}\nGot:\n{}", re, output), - ); - } - - /// This is no actual test, it will be used by the `prefix_in_log_lines` test. - /// The given test will call the test executable to only execute this test that - /// will only print a log line prefixed by the node name `EXPECTED_NODE_NAME`. - #[test] - fn prefix_in_log_lines_entrypoint() { - if env::var("ENABLE_LOGGING").is_ok() { - let test_pattern = "test-target=info"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); - prefix_in_log_lines_process(); - } - } - - #[crate::prefix_logs_with(EXPECTED_NODE_NAME)] - fn prefix_in_log_lines_process() { - log::info!("{}", EXPECTED_LOG_MESSAGE); - } -} diff --git a/client/cli/src/params/database_params.rs b/client/cli/src/params/database_params.rs index 24b23f6076a02..23d2adc07f9de 100644 --- a/client/cli/src/params/database_params.rs +++ b/client/cli/src/params/database_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,6 +18,7 @@ use crate::arg_enums::Database; use structopt::StructOpt; +use sc_service::TransactionStorageMode; /// Parameters for block import. #[derive(Debug, StructOpt)] @@ -34,6 +35,15 @@ pub struct DatabaseParams { /// Limit the memory the database cache can use. #[structopt(long = "db-cache", value_name = "MiB")] pub database_cache_size: Option, + + /// Enable storage chain mode + /// + /// This changes the storage format for blocks bodys. + /// If this is enabled, each transaction is stored separately in the + /// transaction database column and is only referenced by hash + /// in the block body column. + #[structopt(long)] + pub storage_chain: bool, } impl DatabaseParams { @@ -46,4 +56,13 @@ impl DatabaseParams { pub fn database_cache_size(&self) -> Option { self.database_cache_size } + + /// Transaction storage scheme. + pub fn transaction_storage(&self) -> TransactionStorageMode { + if self.storage_chain { + TransactionStorageMode::StorageChain + } else { + TransactionStorageMode::BlockBody + } + } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 1efd4383432fb..7409dbf79dc0f 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::arg_enums::{ - ExecutionStrategy, TracingReceiver, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + ExecutionStrategy, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, }; @@ -73,32 +73,9 @@ pub struct ImportParams { default_value = "67108864" )] pub state_cache_size: usize, - - /// Comma separated list of targets for tracing. - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages. - #[structopt( - long = "tracing-receiver", - value_name = "RECEIVER", - possible_values = &TracingReceiver::variants(), - case_insensitive = true, - default_value = "Log" - )] - pub tracing_receiver: TracingReceiver, } impl ImportParams { - /// Receiver to process tracing messages. - pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { - self.tracing_receiver.clone().into() - } - - /// Comma separated list of targets for tracing. - pub fn tracing_targets(&self) -> Option { - self.tracing_targets.clone() - } /// Specify the state cache size. pub fn state_cache_size(&self) -> usize { diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 3c04d63144595..d75cdebc5a568 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,12 +18,10 @@ use crate::error::Result; use sc_service::config::KeystoreConfig; -use std::fs; -use std::path::PathBuf; +use std::{fs, path::{PathBuf, Path}}; use structopt::StructOpt; use crate::error; -use sp_core::crypto::{SecretString, Zeroize}; -use std::str::FromStr; +use sp_core::crypto::SecretString; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; @@ -31,6 +29,10 @@ const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; /// Parameters of the keystore #[derive(Debug, StructOpt)] pub struct KeystoreParams { + /// Specify custom URIs to connect to for keystore-services + #[structopt(long = "keystore-uri")] + pub keystore_uri: Option, + /// Specify custom keystore path. #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] pub keystore_path: Option, @@ -62,31 +64,26 @@ pub struct KeystoreParams { /// Parse a sercret string, returning a displayable error. pub fn secret_string_from_str(s: &str) -> std::result::Result { - Ok(std::str::FromStr::from_str(s) - .map_err(|_e| "Could not get SecretString".to_string())?) + std::str::FromStr::from_str(s).map_err(|_| "Could not get SecretString".to_string()) } impl KeystoreParams { /// Get the keystore configuration for the parameters - pub fn keystore_config(&self, base_path: &PathBuf) -> Result { + /// + /// Returns a vector of remote-urls and the local Keystore configuration + pub fn keystore_config(&self, config_dir: &Path) -> Result<(Option, KeystoreConfig)> { let password = if self.password_interactive { #[cfg(not(target_os = "unknown"))] { - let mut password = input_keystore_password()?; - let secret = std::str::FromStr::from_str(password.as_str()) - .map_err(|()| "Error reading password")?; - password.zeroize(); - Some(secret) + let password = input_keystore_password()?; + Some(SecretString::new(password)) } #[cfg(target_os = "unknown")] None } else if let Some(ref file) = self.password_filename { - let mut password = fs::read_to_string(file) + let password = fs::read_to_string(file) .map_err(|e| format!("{}", e))?; - let secret = std::str::FromStr::from_str(password.as_str()) - .map_err(|()| "Error reading password")?; - password.zeroize(); - Some(secret) + Some(SecretString::new(password)) } else { self.password.clone() }; @@ -94,9 +91,9 @@ impl KeystoreParams { let path = self .keystore_path .clone() - .unwrap_or_else(|| base_path.join(DEFAULT_KEYSTORE_CONFIG_PATH)); + .unwrap_or_else(|| config_dir.join(DEFAULT_KEYSTORE_CONFIG_PATH)); - Ok(KeystoreConfig::Path { path, password }) + Ok((self.keystore_uri.clone(), KeystoreConfig::Path { path, password })) } /// helper method to fetch password from `KeyParams` or read from stdin @@ -104,10 +101,8 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let mut password = rpassword::read_password_from_tty(Some("Key password: "))?; - let pass = Some(FromStr::from_str(&password).map_err(|()| "Error reading password")?); - password.zeroize(); - pass + let password = rpassword::read_password_from_tty(Some("Key password: "))?; + Some(SecretString::new(password)) } else { password }; diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 93467bc8ec637..8308b123f71f3 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 209742f54e9b8..f4a6e8d3982ba 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,10 +18,10 @@ use crate::params::node_key_params::NodeKeyParams; use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig}, + config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig}, multiaddr::Protocol, }; -use sc_service::{ChainSpec, config::{Multiaddr, MultiaddrWithPeerId}}; +use sc_service::{ChainSpec, ChainType, config::{Multiaddr, MultiaddrWithPeerId}}; use std::path::PathBuf; use structopt::StructOpt; @@ -36,10 +36,14 @@ pub struct NetworkParams { #[structopt(long = "reserved-nodes", value_name = "ADDR")] pub reserved_nodes: Vec, - /// Whether to only allow connections to/from reserved nodes. + /// Whether to only synchronize the chain with reserved nodes. /// - /// If you are a validator your node might still connect to other validator - /// nodes regardless of whether they are defined as reserved nodes. + /// Also disables automatic peer discovery. + /// + /// TCP connections might still be established with non-reserved nodes. + /// In particular, if you are a validator your node might still connect to other + /// validator nodes and collator nodes regardless of whether they are defined as + /// reserved nodes. #[structopt(long = "reserved-only")] pub reserved_only: bool, @@ -94,7 +98,8 @@ pub struct NetworkParams { /// Enable peer discovery on local networks. /// - /// By default this option is true for `--dev` and false otherwise. + /// By default this option is `true` for `--dev` or when the chain type is `Local`/`Development` + /// and false otherwise. #[structopt(long)] pub discover_local: bool, @@ -105,6 +110,10 @@ pub struct NetworkParams { /// security improvements. #[structopt(long)] pub kademlia_disjoint_query_paths: bool, + + /// Join the IPFS network and serve transactions over bitswap protocol. + #[structopt(long)] + pub ipfs_server: bool, } impl NetworkParams { @@ -139,32 +148,44 @@ impl NetworkParams { let mut boot_nodes = chain_spec.boot_nodes().to_vec(); boot_nodes.extend(self.bootnodes.clone()); + let chain_type = chain_spec.chain_type(); + // Activate if the user explicitly requested local discovery, `--dev` is given or the + // chain type is `Local`/`Development` + let allow_non_globals_in_dht = self.discover_local + || is_dev + || matches!(chain_type, ChainType::Local | ChainType::Development); + NetworkConfiguration { boot_nodes, net_config_path, - reserved_nodes: self.reserved_nodes.clone(), - non_reserved_mode: if self.reserved_only { - NonReservedPeerMode::Deny - } else { - NonReservedPeerMode::Accept + default_peers_set: SetConfig { + in_peers: self.in_peers, + out_peers: self.out_peers, + reserved_nodes: self.reserved_nodes.clone(), + non_reserved_mode: if self.reserved_only { + NonReservedPeerMode::Deny + } else { + NonReservedPeerMode::Accept + }, }, listen_addresses, public_addresses, - notifications_protocols: Vec::new(), + extra_sets: Vec::new(), request_response_protocols: Vec::new(), node_key, node_name: node_name.to_string(), client_version: client_id.to_string(), - in_peers: self.in_peers, - out_peers: self.out_peers, transport: TransportConfig::Normal { enable_mdns: !is_dev && !self.no_mdns, allow_private_ipv4: !self.no_private_ipv4, wasm_external_transport: None, }, max_parallel_downloads: self.max_parallel_downloads, - allow_non_globals_in_dht: self.discover_local || is_dev, + enable_dht_random_walk: !self.reserved_only, + allow_non_globals_in_dht, kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths, + yamux_window_size: None, + ipfs_server: self.ipfs_server, } } } diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 875411fbfb620..d43c87804dd3b 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/params/offchain_worker_params.rs b/client/cli/src/params/offchain_worker_params.rs index f8d48edc4729d..ef39a1ed41be2 100644 --- a/client/cli/src/params/offchain_worker_params.rs +++ b/client/cli/src/params/offchain_worker_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 7db808e6d8f2f..467ca253531f1 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use sc_service::{PruningMode, Role}; +use sc_service::{PruningMode, Role, KeepBlocks}; use structopt::StructOpt; /// Parameters to define the pruning mode @@ -30,11 +30,16 @@ pub struct PruningParams { /// 256 blocks. #[structopt(long = "pruning", value_name = "PRUNING_MODE")] pub pruning: Option, + /// Specify the number of finalized blocks to keep in the database. + /// + /// Default is to keep all blocks. + #[structopt(long, value_name = "COUNT")] + pub keep_blocks: Option, } impl PruningParams { /// Get the pruning value from the parameters - pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { + pub fn state_pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { // by default we disable pruning if the node is an authority (i.e. // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the // node is an authority and pruning is enabled explicitly, then we error @@ -58,4 +63,12 @@ impl PruningParams { } }) } + + /// Get the block pruning value from the parameters + pub fn keep_blocks(&self) -> error::Result { + Ok(match self.keep_blocks { + Some(n) => KeepBlocks::Some(n), + None => KeepBlocks::All, + }) + } } diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index ad9ab04070563..45ce41846bf12 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,11 +19,15 @@ use sc_service::config::BasePath; use std::path::PathBuf; use structopt::StructOpt; +use crate::arg_enums::TracingReceiver; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt)] pub struct SharedParams { - /// Specify the chain specification (one of dev, local, or staging). + /// Specify the chain specification. + /// + /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file with + /// the chainspec (such as one exported by the `build-spec` subcommand). #[structopt(long, value_name = "CHAIN_SPEC")] pub chain: Option, @@ -41,6 +45,32 @@ pub struct SharedParams { /// By default, all targets log `info`. The global log level can be set with -l. #[structopt(short = "l", long, value_name = "LOG_PATTERN")] pub log: Vec, + + /// Disable log color output. + #[structopt(long)] + pub disable_log_color: bool, + + /// Disable feature to dynamically update and reload the log filter. + /// + /// By default this feature is enabled, however it leads to a small performance decrease. + /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this + /// option set. + #[structopt(long = "disable-log-reloading")] + pub disable_log_reloading: bool, + + /// Sets a custom profiling filter. Syntax is the same as for logging: = + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages. + #[structopt( + long = "tracing-receiver", + value_name = "RECEIVER", + possible_values = &TracingReceiver::variants(), + case_insensitive = true, + default_value = "Log" + )] + pub tracing_receiver: TracingReceiver, } impl SharedParams { @@ -72,4 +102,24 @@ impl SharedParams { pub fn log_filters(&self) -> &[String] { &self.log } + + /// Should the log color output be disabled? + pub fn disable_log_color(&self) -> bool { + self.disable_log_color + } + + /// Is log reloading disabled + pub fn is_log_filter_reloading_disabled(&self) -> bool { + self.disable_log_reloading + } + + /// Receiver to process tracing messages. + pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { + self.tracing_receiver.clone().into() + } + + /// Comma separated list of targets for tracing. + pub fn tracing_targets(&self) -> Option { + self.tracing_targets.clone() + } } diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index 3ad278426922e..bf0ed53e531c9 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index e6d35282ada2b..61a7fe9b01454 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,19 +25,22 @@ use futures::select; use futures::{future, future::FutureExt, Future}; use log::info; use sc_service::{Configuration, TaskType, TaskManager}; +use sc_telemetry::{TelemetryHandle, TelemetryWorker}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; use std::marker::PhantomData; +use sc_service::Error as ServiceError; +use crate::error::Error as CliError; #[cfg(target_family = "unix")] -async fn main(func: F) -> std::result::Result<(), Box> +async fn main(func: F) -> std::result::Result<(), E> where F: Future> + future::FusedFuture, - E: 'static + std::error::Error, + E: std::error::Error + Send + Sync + 'static + From, { use tokio::signal::unix::{signal, SignalKind}; - let mut stream_int = signal(SignalKind::interrupt())?; - let mut stream_term = signal(SignalKind::terminate())?; + let mut stream_int = signal(SignalKind::interrupt()).map_err(ServiceError::Io)?; + let mut stream_term = signal(SignalKind::terminate()).map_err(ServiceError::Io)?; let t1 = stream_int.recv().fuse(); let t2 = stream_term.recv().fuse(); @@ -55,10 +58,10 @@ where } #[cfg(not(unix))] -async fn main(func: F) -> std::result::Result<(), Box> +async fn main(func: F) -> std::result::Result<(), E> where F: Future> + future::FusedFuture, - E: 'static + std::error::Error, + E: std::error::Error + Send + Sync + 'static + From, { use tokio::signal::ctrl_c; @@ -90,19 +93,19 @@ pub fn build_runtime() -> std::result::Result( +fn run_until_exit( mut tokio_runtime: tokio::runtime::Runtime, - future: FUT, + future: F, task_manager: TaskManager, -) -> Result<()> +) -> std::result::Result<(), E> where - FUT: Future> + future::Future, - ERR: 'static + std::error::Error, + F: Future> + future::Future, + E: std::error::Error + Send + Sync + 'static + From, { let f = future.fuse(); pin_mut!(f); - tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; + tokio_runtime.block_on(main(f))?; tokio_runtime.block_on(task_manager.clean_shutdown()); Ok(()) @@ -112,12 +115,17 @@ where pub struct Runner { config: Configuration, tokio_runtime: tokio::runtime::Runtime, + telemetry_worker: TelemetryWorker, phantom: PhantomData, } impl Runner { /// Create a new runtime with the command provided in argument - pub fn new(cli: &C, command: &T) -> Result> { + pub fn new( + cli: &C, + command: &T, + telemetry_worker: TelemetryWorker, + ) -> Result> { let tokio_runtime = build_runtime()?; let runtime_handle = tokio_runtime.handle().clone(); @@ -130,9 +138,16 @@ impl Runner { } }; + let telemetry_handle = telemetry_worker.handle(); + Ok(Runner { - config: command.create_configuration(cli, task_executor.into())?, + config: command.create_configuration( + cli, + task_executor.into(), + Some(telemetry_handle), + )?, tokio_runtime, + telemetry_worker, phantom: PhantomData, }) } @@ -172,32 +187,44 @@ impl Runner { /// A helper function that runs a node with tokio and stops if the process receives the signal /// `SIGTERM` or `SIGINT`. - pub fn run_node_until_exit>>( + pub fn run_node_until_exit( mut self, initialize: impl FnOnce(Configuration) -> F, - ) -> Result<()> { + ) -> std::result::Result<(), E> + where + F: Future>, + E: std::error::Error + Send + Sync + 'static + From, + { self.print_node_infos(); let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; + task_manager.spawn_handle().spawn("telemetry_worker", self.telemetry_worker.run()); let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); self.tokio_runtime.block_on(task_manager.clean_shutdown()); - res.map_err(|e| e.to_string().into()) + Ok(res?) } /// A helper function that runs a command with the configuration of this node. - pub fn sync_run(self, runner: impl FnOnce(Configuration) -> Result<()>) -> Result<()> { + pub fn sync_run( + self, + runner: impl FnOnce(Configuration) -> std::result::Result<(), E> + ) -> std::result::Result<(), E> + where + E: std::error::Error + Send + Sync + 'static + From, + { runner(self.config) } /// A helper function that runs a future with tokio and stops if the process receives /// the signal `SIGTERM` or `SIGINT`. - pub fn async_run( - self, runner: impl FnOnce(Configuration) -> Result<(FUT, TaskManager)>, - ) -> Result<()> + pub fn async_run( + self, runner: impl FnOnce(Configuration) -> std::result::Result<(F, TaskManager), E>, + ) -> std::result::Result<(), E> where - FUT: Future>, + F: Future>, + E: std::error::Error + Send + Sync + 'static + From + From, { let (future, task_manager) = runner(self.config)?; - run_until_exit(self.tokio_runtime, future, task_manager) + run_until_exit::<_, E>(self.tokio_runtime, future, task_manager) } /// Get an immutable reference to the node Configuration @@ -209,4 +236,11 @@ impl Runner { pub fn config_mut(&mut self) -> &mut Configuration { &mut self.config } + + /// Get a new [`TelemetryHandle`]. + /// + /// This is used when you want to register with the [`TelemetryWorker`]. + pub fn telemetry_handle(&self) -> TelemetryHandle { + self.telemetry_worker.handle() + } } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index ccc4d515a8e11..1465119c81d08 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-aura" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" @@ -13,38 +13,42 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } -sc-client-api = { version = "2.0.0", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-application-crypto = { version = "3.0.0", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.9.0", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "3.0.0", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.9.0", path = "../../block-builder" } +sc-client-api = { version = "3.0.0", path = "../../api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-consensus-slots = { version = "0.9.0", path = "../../../primitives/consensus/slots" } derive_more = "0.99.2" -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } log = "0.4.8" -parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8.0", path = "../slots" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} +parking_lot = "0.11.1" +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } +sp-version = { version = "3.0.0", path = "../../../primitives/version" } +sc-consensus-slots = { version = "0.9.0", path = "../slots" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } +sc-telemetry = { version = "3.0.0", path = "../../telemetry" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +# We enable it only for web-wasm check +# See https://docs.rs/getrandom/0.2.1/getrandom/#webassembly-support +getrandom = { version = "0.2", features = ["js"], optional = true } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-executor = { version = "0.8.0", path = "../../executor" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } -sc-network = { version = "0.8.0", path = "../../network" } +sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } +sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } +sc-executor = { version = "0.9.0", path = "../../executor" } +sc-keystore = { version = "3.0.0", path = "../../keystore" } +sc-network = { version = "0.9.0", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8.0", default-features = false, path = "../../service" } +sc-service = { version = "0.9.0", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/aura/src/digests.rs b/client/consensus/aura/src/digests.rs index 3332e4c6a6dff..bbf31136b0fcd 100644 --- a/client/consensus/aura/src/digests.rs +++ b/client/consensus/aura/src/digests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,6 +24,7 @@ use sp_core::Pair; use sp_consensus_aura::AURA_ENGINE_ID; use sp_runtime::generic::{DigestItem, OpaqueDigestItemId}; +use sp_consensus_slots::Slot; use codec::{Encode, Codec}; use std::fmt::Debug; @@ -38,10 +39,10 @@ pub trait CompatibleDigestItem: Sized { fn as_aura_seal(&self) -> Option>; /// Construct a digest item which contains the slot number - fn aura_pre_digest(slot_num: u64) -> Self; + fn aura_pre_digest(slot: Slot) -> Self; /// If this item is an AuRa pre-digest, return the slot number - fn as_aura_pre_digest(&self) -> Option; + fn as_aura_pre_digest(&self) -> Option; } impl CompatibleDigestItem

for DigestItem where @@ -57,11 +58,11 @@ impl CompatibleDigestItem

for DigestItem where self.try_to(OpaqueDigestItemId::Seal(&AURA_ENGINE_ID)) } - fn aura_pre_digest(slot_num: u64) -> Self { - DigestItem::PreRuntime(AURA_ENGINE_ID, slot_num.encode()) + fn aura_pre_digest(slot: Slot) -> Self { + DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode()) } - fn as_aura_pre_digest(&self) -> Option { + fn as_aura_pre_digest(&self) -> Option { self.try_to(OpaqueDigestItemId::PreRuntime(&AURA_ENGINE_ID)) } } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 5013c1813b685..5b9e7c590bde5 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -43,11 +43,11 @@ use prometheus_endpoint::Registry; use codec::{Encode, Decode, Codec}; use sp_consensus::{ - self, BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, SlotData, BlockCheckParams, ImportResult -}; -use sp_consensus::import_queue::{ - Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, BoxFinalityProofImport, + BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, + BlockOrigin, Error as ConsensusError, SelectChain, SlotData, BlockCheckParams, ImportResult, + import_queue::{ + Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, + }, }; use sc_client_api::{backend::AuxStore, BlockOf}; use sp_blockchain::{ @@ -57,10 +57,7 @@ use sp_blockchain::{ use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_core::crypto::Public; use sp_application_crypto::{AppKey, AppPublic}; -use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, - Justification, -}; +use sp_runtime::{generic::{BlockId, OpaqueDigestItemId}, traits::NumberFor, Justification}; use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero, Member}; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; @@ -73,7 +70,9 @@ use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_consensus_slots::{ CheckedHeader, SlotInfo, SlotCompatible, StorageChanges, check_equivocation, + BackoffAuthoringBlocksStrategy, }; +use sp_consensus_slots::Slot; use sp_api::ApiExt; @@ -105,10 +104,10 @@ pub fn slot_duration(client: &C) -> CResult where } /// Get slot author for given block along with authorities. -fn slot_author(slot_num: u64, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { +fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { if authorities.is_empty() { return None } - let idx = slot_num % (authorities.len() as u64); + let idx = *slot % (authorities.len() as u64); assert!( idx <= usize::max_value() as u64, "It is impossible to have a vector with length beyond the address space; qed", @@ -138,7 +137,7 @@ impl SlotCompatible for AuraSlotCompatible { } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( +pub fn start_aura( slot_duration: SlotDuration, client: Arc, select_chain: SC, @@ -147,11 +146,12 @@ pub fn start_aura( sync_oracle: SO, inherent_data_providers: InherentDataProviders, force_authoring: bool, + backoff_authoring_blocks: Option, keystore: SyncCryptoStorePtr, can_author_with: CAW, ) -> Result, sp_consensus::Error> where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + Send + Sync, + C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, SC: SelectChain, E: Environment + Send + Sync + 'static, @@ -163,6 +163,7 @@ pub fn start_aura( Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, CAW: CanAuthorWith + Send, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { let worker = AuraWorker { client, @@ -171,6 +172,7 @@ pub fn start_aura( keystore, sync_oracle: sync_oracle.clone(), force_authoring, + backoff_authoring_blocks, _key_type: PhantomData::

, }; register_aura_inherent_data_provider( @@ -188,20 +190,22 @@ pub fn start_aura( )) } -struct AuraWorker { +struct AuraWorker { client: Arc, block_import: Arc>, env: E, keystore: SyncCryptoStorePtr, sync_oracle: SO, force_authoring: bool, + backoff_authoring_blocks: Option, _key_type: PhantomData

, } -impl sc_consensus_slots::SimpleSlotWorker for AuraWorker +impl sc_consensus_slots::SimpleSlotWorker + for AuraWorker where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync, + C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync, C::Api: AuraApi>, E: Environment, E::Proposer: Proposer>, @@ -210,6 +214,7 @@ where P::Public: AppPublic + Public + Member + Encode + Decode + Hash, P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, SO: SyncOracle + Send + Clone, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, Error: std::error::Error + Send + From + 'static, { type BlockImport = I; @@ -232,7 +237,7 @@ where fn epoch_data( &self, header: &B::Header, - _slot_number: u64, + _slot: Slot, ) -> Result { authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } @@ -244,12 +249,12 @@ where fn claim_slot( &self, _header: &B::Header, - slot_number: u64, + slot: Slot, epoch_data: &Self::EpochData, ) -> Option { - let expected_author = slot_author::

(slot_number, epoch_data); + let expected_author = slot_author::

(slot, epoch_data); expected_author.and_then(|p| { - if SyncCryptoStore::has_keys( + if SyncCryptoStore::has_keys( &*self.keystore, &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], ) { @@ -262,11 +267,11 @@ where fn pre_digest_data( &self, - slot_number: u64, + slot: Slot, _claim: &Self::Claim, ) -> Vec> { vec![ - as CompatibleDigestItem

>::aura_pre_digest(slot_number), + as CompatibleDigestItem

>::aura_pre_digest(slot), ] } @@ -316,6 +321,21 @@ where self.force_authoring } + fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = find_pre_digest::(chain_head) { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot, + self.logging_target(), + ); + } + } + false + } + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { &mut self.sync_oracle } @@ -341,9 +361,10 @@ where if let Some(slot_lenience) = sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) { - debug!(target: "aura", + debug!( + target: "aura", "No block for {} slots. Applying linear lenience of {}s", - slot_info.number.saturating_sub(parent_slot + 1), + slot_info.slot.saturating_sub(parent_slot + 1), slot_lenience.as_secs(), ); @@ -379,7 +400,7 @@ enum Error { DataProvider(String), Runtime(String), #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] - SlotNumberMustIncrease(u64, u64), + SlotMustIncrease(Slot, Slot), #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] ParentUnavailable(B::Hash, B::Hash), } @@ -390,16 +411,16 @@ impl std::convert::From> for String { } } -fn find_pre_digest(header: &B::Header) -> Result> +fn find_pre_digest(header: &B::Header) -> Result> where DigestItemFor: CompatibleDigestItem

, P::Signature: Decode, P::Public: Encode + Decode + PartialEq + Clone, { if header.number().is_zero() { - return Ok(0); + return Ok(0.into()); } - let mut pre_digest: Option = None; + let mut pre_digest: Option = None; for log in header.digest().logs() { trace!(target: "aura", "Checking log {:?}", log); match (log.as_aura_pre_digest(), pre_digest.is_some()) { @@ -418,11 +439,11 @@ fn find_pre_digest(header: &B::Header) -> Result( client: &C, - slot_now: u64, + slot_now: Slot, mut header: B::Header, hash: B::Hash, authorities: &[AuthorityId

], -) -> Result)>, Error> where +) -> Result)>, Error> where DigestItemFor: CompatibleDigestItem

, P::Signature: Decode, C: sc_client_api::backend::AuxStore, @@ -437,15 +458,15 @@ fn check_header( aura_err(Error::HeaderBadSeal(hash)) })?; - let slot_num = find_pre_digest::(&header)?; + let slot = find_pre_digest::(&header)?; - if slot_num > slot_now { + if slot > slot_now { header.digest_mut().push(seal); - Ok(CheckedHeader::Deferred(header, slot_num)) + Ok(CheckedHeader::Deferred(header, slot)) } else { // check the signature is valid under the expected authority and // chain state. - let expected_author = match slot_author::

(slot_num, &authorities) { + let expected_author = match slot_author::

(slot, &authorities) { None => return Err(Error::SlotAuthorNotFound), Some(author) => author, }; @@ -456,19 +477,19 @@ fn check_header( if let Some(equivocation_proof) = check_equivocation( client, slot_now, - slot_num, + slot, &header, expected_author, ).map_err(Error::Client)? { info!( "Slot author is equivocating at slot {} with headers {:?} and {:?}", - slot_num, + slot, equivocation_proof.first_header.hash(), equivocation_proof.second_header.hash(), ); } - Ok(CheckedHeader::Checked(header, (slot_num, seal))) + Ok(CheckedHeader::Checked(header, (slot, seal))) } else { Err(Error::BadSignature(hash)) } @@ -592,12 +613,12 @@ impl Verifier for AuraVerifier where &authorities[..], ).map_err(|e| e.to_string())?; match checked_header { - CheckedHeader::Checked(pre_header, (slot_num, seal)) => { + CheckedHeader::Checked(pre_header, (slot, seal)) => { // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. if let Some(inner_body) = body.take() { - inherent_data.aura_replace_inherent_data(slot_num); + inherent_data.aura_replace_inherent_data(slot); let block = B::new(pre_header.clone(), inner_body); // skip the inherents verification if the runtime API is old. @@ -781,7 +802,7 @@ impl BlockImport for AuraBlockImport>, ) -> Result { let hash = block.post_hash(); - let slot_number = find_pre_digest::(&block.header) + let slot = find_pre_digest::(&block.header) .expect("valid Aura headers must contain a predigest; \ header has been already verified; qed"); @@ -797,10 +818,10 @@ impl BlockImport for AuraBlockImport::SlotNumberMustIncrease(parent_slot, slot_number) + Error::::SlotMustIncrease(parent_slot, slot) ).into()) ); } @@ -814,7 +835,6 @@ pub fn import_queue( slot_duration: SlotDuration, block_import: I, justification_import: Option>, - finality_proof_import: Option>, client: Arc, inherent_data_providers: InherentDataProviders, spawner: &S, @@ -846,7 +866,6 @@ pub fn import_queue( verifier, Box::new(block_import), justification_import, - finality_proof_import, spawner, registry, )) @@ -863,7 +882,7 @@ mod tests { use sp_keyring::sr25519::Keyring; use sc_client_api::BlockchainEvents; use sp_consensus_aura::sr25519::AuthorityPair; - use sc_consensus_slots::SimpleSlotWorker; + use sc_consensus_slots::{SimpleSlotWorker, BackoffAuthoringOnFinalizedHeadLagging}; use std::task::Poll; use sc_block_builder::BlockBuilderProvider; use sp_runtime::traits::Header as _; @@ -1012,7 +1031,7 @@ mod tests { &inherent_data_providers, slot_duration.get() ).expect("Registers aura inherent data provider"); - aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( + aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _, _>( slot_duration, client.clone(), select_chain, @@ -1021,6 +1040,7 @@ mod tests { DummyOracle, inherent_data_providers, false, + Some(BackoffAuthoringOnFinalizedHeadLagging::default()), keystore, sp_consensus::AlwaysCanAuthor, ).expect("Starts aura")); @@ -1081,6 +1101,7 @@ mod tests { keystore: keystore.into(), sync_oracle: DummyOracle.clone(), force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), _key_type: PhantomData::, }; @@ -1091,13 +1112,13 @@ mod tests { Default::default(), Default::default() ); - assert!(worker.claim_slot(&head, 0, &authorities).is_none()); - assert!(worker.claim_slot(&head, 1, &authorities).is_none()); - assert!(worker.claim_slot(&head, 2, &authorities).is_none()); - assert!(worker.claim_slot(&head, 3, &authorities).is_some()); - assert!(worker.claim_slot(&head, 4, &authorities).is_none()); - assert!(worker.claim_slot(&head, 5, &authorities).is_none()); - assert!(worker.claim_slot(&head, 6, &authorities).is_none()); - assert!(worker.claim_slot(&head, 7, &authorities).is_some()); + assert!(worker.claim_slot(&head, 0.into(), &authorities).is_none()); + assert!(worker.claim_slot(&head, 1.into(), &authorities).is_none()); + assert!(worker.claim_slot(&head, 2.into(), &authorities).is_none()); + assert!(worker.claim_slot(&head, 3.into(), &authorities).is_some()); + assert!(worker.claim_slot(&head, 4.into(), &authorities).is_none()); + assert!(worker.claim_slot(&head, 5.into(), &authorities).is_none()); + assert!(worker.claim_slot(&head, 6.into(), &authorities).is_none()); + assert!(worker.claim_slot(&head, 7.into(), &authorities).is_some()); } } diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 2178f1cf97010..14d48fba1bb57 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" @@ -14,54 +14,55 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-application-crypto = { version = "3.0.0", path = "../../../primitives/application-crypto" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sc-consensus-epochs = { version = "0.8.0", path = "../epochs" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-consensus-vrf = { version = "0.8.0", path = "../../../primitives/consensus/vrf" } -sc-consensus-uncles = { version = "0.8.0", path = "../uncles" } -sc-consensus-slots = { version = "0.8.0", path = "../slots" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../../primitives/utils" } -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} -futures = "0.3.4" +sp-version = { version = "3.0.0", path = "../../../primitives/version" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "3.0.0", path = "../../telemetry" } +sc-keystore = { version = "3.0.0", path = "../../keystore" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sc-consensus-epochs = { version = "0.9.0", path = "../epochs" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-block-builder = { version = "3.0.0", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-consensus-slots = { version = "0.9.0", path = "../../../primitives/consensus/slots" } +sp-consensus-vrf = { version = "0.9.0", path = "../../../primitives/consensus/vrf" } +sc-consensus-uncles = { version = "0.9.0", path = "../uncles" } +sc-consensus-slots = { version = "0.9.0", path = "../slots" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../../primitives/utils" } +fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +futures = "0.3.9" futures-timer = "3.0.1" -parking_lot = "0.10.0" +parking_lot = "0.11.1" log = "0.4.8" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } rand = "0.7.2" merlin = "2.0" pdqselect = "0.1.0" derive_more = "0.99.2" -retain_mut = "0.1.1" +retain_mut = "0.1.2" [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-executor = { version = "0.8.0", path = "../../executor" } -sc-network = { version = "0.8.0", path = "../../network" } +sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } +sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } +sc-executor = { version = "0.9.0", path = "../../executor" } +sc-network = { version = "0.9.0", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8.0", default-features = false, path = "../../service" } +sc-service = { version = "0.9.0", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sc-block-builder = { version = "0.9.0", path = "../../block-builder" } rand_chacha = "0.2.2" tempfile = "3.1.0" diff --git a/client/consensus/babe/README.md b/client/consensus/babe/README.md index faba3948ed715..a404d2ea44706 100644 --- a/client/consensus/babe/README.md +++ b/client/consensus/babe/README.md @@ -43,6 +43,6 @@ primary blocks in the chain. We will pick the heaviest chain (more primary blocks) and will go with the longest one in case of a tie. An in-depth description and analysis of the protocol can be found here: - + License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 5b3169e600a98..71a1205e3c7aa 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" @@ -13,28 +13,28 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-consensus-babe = { version = "0.8.0", path = "../" } -sc-rpc-api = { version = "0.8.0", path = "../../../rpc-api" } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -sp-consensus-babe = { version = "0.8.0", path = "../../../../primitives/consensus/babe" } +sc-consensus-babe = { version = "0.9.0", path = "../" } +sc-rpc-api = { version = "0.9.0", path = "../../../rpc-api" } +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" +sp-consensus-babe = { version = "0.9.0", path = "../../../../primitives/consensus/babe" } serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8.0", path = "../../epochs" } +sp-blockchain = { version = "3.0.0", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.9.0", path = "../../epochs" } futures = { version = "0.3.4", features = ["compat"] } derive_more = "0.99.2" -sp-api = { version = "2.0.0", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8.0", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../../primitives/application-crypto" } -sp-keystore = { version = "0.8.0", path = "../../../../primitives/keystore" } +sp-api = { version = "3.0.0", path = "../../../../primitives/api" } +sp-consensus = { version = "0.9.0", path = "../../../../primitives/consensus/common" } +sp-core = { version = "3.0.0", path = "../../../../primitives/core" } +sp-application-crypto = { version = "3.0.0", path = "../../../../primitives/application-crypto" } +sp-keystore = { version = "0.9.0", path = "../../../../primitives/keystore" } [dev-dependencies] -sc-consensus = { version = "0.8.0", path = "../../../consensus/common" } +sc-consensus = { version = "0.9.0", path = "../../../consensus/common" } serde_json = "1.0.50" -sp-keyring = { version = "2.0.0", path = "../../../../primitives/keyring" } -sc-keystore = { version = "2.0.0", path = "../../../keystore" } +sp-keyring = { version = "3.0.0", path = "../../../../primitives/keyring" } +sc-keystore = { version = "3.0.0", path = "../../../keystore" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index a90964cdf73f7..ca14a764eece5 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -124,7 +124,13 @@ impl BabeApi for BabeRpcHandler .map_err(|err| { Error::StringError(format!("{:?}", err)) })?; - let epoch = epoch_data(&shared_epoch, &client, &babe_config, epoch_start, &select_chain)?; + let epoch = epoch_data( + &shared_epoch, + &client, + &babe_config, + *epoch_start, + &select_chain, + )?; let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); let mut claims: HashMap = HashMap::new(); @@ -142,19 +148,19 @@ impl BabeApi for BabeRpcHandler .collect::>() }; - for slot_number in epoch_start..epoch_end { + for slot in *epoch_start..*epoch_end { if let Some((claim, key)) = - authorship::claim_slot_using_keys(slot_number, &epoch, &keystore, &keys) + authorship::claim_slot_using_keys(slot.into(), &epoch, &keystore, &keys) { match claim { PreDigest::Primary { .. } => { - claims.entry(key).or_default().primary.push(slot_number); + claims.entry(key).or_default().primary.push(slot); } PreDigest::SecondaryPlain { .. } => { - claims.entry(key).or_default().secondary.push(slot_number); + claims.entry(key).or_default().secondary.push(slot); } PreDigest::SecondaryVRF { .. } => { - claims.entry(key).or_default().secondary_vrf.push(slot_number); + claims.entry(key).or_default().secondary_vrf.push(slot.into()); }, }; } @@ -167,7 +173,7 @@ impl BabeApi for BabeRpcHandler } } -/// Holds information about the `slot_number`'s that can be claimed by a given key. +/// Holds information about the `slot`'s that can be claimed by a given key. #[derive(Default, Debug, Deserialize, Serialize)] pub struct EpochAuthorship { /// the array of primary slots that can be claimed @@ -197,12 +203,12 @@ impl From for jsonrpc_core::Error { } } -/// fetches the epoch data for a given slot_number. +/// fetches the epoch data for a given slot. fn epoch_data( epoch_changes: &SharedEpochChanges, client: &Arc, babe_config: &Config, - slot_number: u64, + slot: u64, select_chain: &SC, ) -> Result where @@ -215,7 +221,7 @@ fn epoch_data( descendent_query(&**client), &parent.hash(), parent.number().clone(), - slot_number, + slot.into(), |slot| Epoch::genesis(&babe_config, slot), ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 28a3692958e18..1120f660613a6 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -1,28 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! BABE authority selection and slot claiming. use sp_application_crypto::AppKey; use sp_consensus_babe::{ - BABE_VRF_PREFIX, - AuthorityId, BabeAuthorityWeight, - SlotNumber, - make_transcript, - make_transcript_data, + BABE_VRF_PREFIX, AuthorityId, BabeAuthorityWeight, make_transcript, make_transcript_data, + Slot, }; use sp_consensus_babe::digests::{ PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, @@ -104,7 +103,7 @@ pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool /// authorities. This should always assign the slot to some authority unless the /// authorities list is empty. pub(super) fn secondary_slot_author( - slot_number: u64, + slot: Slot, authorities: &[(AuthorityId, BabeAuthorityWeight)], randomness: [u8; 32], ) -> Option<&AuthorityId> { @@ -112,7 +111,7 @@ pub(super) fn secondary_slot_author( return None; } - let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); + let rand = U256::from((randomness, slot).using_encoded(blake2_256)); let authorities_len = U256::from(authorities.len()); let idx = rand % authorities_len; @@ -128,7 +127,7 @@ pub(super) fn secondary_slot_author( /// pre-digest to use when authoring the block, or `None` if it is not our turn /// to propose. fn claim_secondary_slot( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, keys: &[(AuthorityId, usize)], keystore: &SyncCryptoStorePtr, @@ -141,7 +140,7 @@ fn claim_secondary_slot( } let expected_author = super::authorship::secondary_slot_author( - slot_number, + slot, authorities, *randomness, )?; @@ -151,7 +150,7 @@ fn claim_secondary_slot( let pre_digest = if author_secondary_vrf { let transcript_data = super::authorship::make_transcript_data( randomness, - slot_number, + slot, *epoch_index, ); let result = SyncCryptoStore::sr25519_vrf_sign( @@ -162,7 +161,7 @@ fn claim_secondary_slot( ); if let Ok(signature) = result { Some(PreDigest::SecondaryVRF(SecondaryVRFPreDigest { - slot_number, + slot, vrf_output: VRFOutput(signature.output), vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, @@ -172,7 +171,7 @@ fn claim_secondary_slot( } } else if SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) { Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - slot_number, + slot, authority_index: *authority_index as u32, })) } else { @@ -193,7 +192,7 @@ fn claim_secondary_slot( /// secondary slots enabled for the given epoch, we will fallback to trying to /// claim a secondary slot. pub fn claim_slot( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { @@ -201,24 +200,24 @@ pub fn claim_slot( .enumerate() .map(|(index, a)| (a.0.clone(), index)) .collect::>(); - claim_slot_using_keys(slot_number, epoch, keystore, &authorities) + claim_slot_using_keys(slot, epoch, keystore, &authorities) } /// Like `claim_slot`, but allows passing an explicit set of key pairs. Useful if we intend /// to make repeated calls for different slots using the same key pairs. pub fn claim_slot_using_keys( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, keystore: &SyncCryptoStorePtr, keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { - claim_primary_slot(slot_number, epoch, epoch.config.c, keystore, &keys) + claim_primary_slot(slot, epoch, epoch.config.c, keystore, &keys) .or_else(|| { if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() { claim_secondary_slot( - slot_number, + slot, &epoch, keys, &keystore, @@ -235,7 +234,7 @@ pub fn claim_slot_using_keys( /// the VRF. If the VRF produces a value less than `threshold`, it is our turn, /// so it returns `Some(_)`. Otherwise, it returns `None`. fn claim_primary_slot( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, c: (u64, u64), keystore: &SyncCryptoStorePtr, @@ -246,12 +245,12 @@ fn claim_primary_slot( for (authority_id, authority_index) in keys { let transcript = super::authorship::make_transcript( randomness, - slot_number, + slot, *epoch_index ); let transcript_data = super::authorship::make_transcript_data( randomness, - slot_number, + slot, *epoch_index ); // Compute the threshold we will use. @@ -274,7 +273,7 @@ fn claim_primary_slot( }; if super::authorship::check_primary_threshold(&inout, threshold) { let pre_digest = PreDigest::Primary(PrimaryPreDigest { - slot_number, + slot, vrf_output: VRFOutput(signature.output), vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, @@ -312,7 +311,7 @@ mod tests { let mut epoch = Epoch { epoch_index: 10, - start_slot: 0, + start_slot: 0.into(), duration: 20, authorities: authorities.clone(), randomness: Default::default(), @@ -322,9 +321,9 @@ mod tests { }, }; - assert!(claim_slot(10, &epoch, &keystore).is_none()); + assert!(claim_slot(10.into(), &epoch, &keystore).is_none()); epoch.authorities.push((valid_public_key.clone().into(), 10)); - assert_eq!(claim_slot(10, &epoch, &keystore).unwrap().1, valid_public_key.into()); + assert_eq!(claim_slot(10.into(), &epoch, &keystore).unwrap().1, valid_public_key.into()); } } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 287121566a417..7d5df77c92176 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Schema for BABE epoch changes in the aux-db. @@ -42,7 +44,7 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> T: Decode, { let corrupt = |e: codec::Error| { - ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) + ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e)) }; match backend.get_aux(key)? { None => Ok(None), @@ -149,7 +151,7 @@ mod test { #[test] fn load_decode_from_v0_epoch_changes() { let epoch = EpochV0 { - start_slot: 0, + start_slot: 0.into(), authorities: vec![], randomness: [0; 32], epoch_index: 1, @@ -193,8 +195,8 @@ mod test { .map(|(_, _, epoch)| epoch.clone()) .collect::>() == vec![PersistedEpochHeader::Regular(EpochHeader { - start_slot: 0, - end_slot: 100, + start_slot: 0.into(), + end_slot: 100.into(), })], ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 4705381c2b918..6ffa18c3cc3a4 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! # BABE (Blind Assignment for Blockchain Extension) //! @@ -59,15 +61,13 @@ //! blocks) and will go with the longest one in case of a tie. //! //! An in-depth description and analysis of the protocol can be found here: -//! +//! #![forbid(unsafe_code)] #![warn(missing_docs)] pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, - BabeEpochConfiguration, BabeGenesisConfiguration, - AuthorityId, AuthorityPair, AuthoritySignature, - BabeAuthorityWeight, VRF_OUTPUT_LENGTH, + BabeApi, ConsensusLog, BABE_ENGINE_ID, BabeEpochConfiguration, BabeGenesisConfiguration, + AuthorityId, AuthorityPair, AuthoritySignature, BabeAuthorityWeight, VRF_OUTPUT_LENGTH, digests::{ CompatibleDigestItem, NextEpochDescriptor, NextConfigDescriptor, PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, @@ -78,10 +78,7 @@ use std::{ collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, any::Any, borrow::Cow, convert::TryInto, }; -use sp_consensus::{ImportResult, CanAuthorWith}; -use sp_consensus::import_queue::{ - BoxJustificationImport, BoxFinalityProofImport, -}; +use sp_consensus::{ImportResult, CanAuthorWith, import_queue::BoxJustificationImport}; use sp_core::crypto::Public; use sp_application_crypto::AppKey; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; @@ -94,16 +91,14 @@ use parking_lot::Mutex; use sp_inherents::{InherentDataProviders, InherentData}; use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ - self, BlockImport, Environment, Proposer, BlockCheckParams, + BlockImport, Environment, Proposer, BlockCheckParams, ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, - SelectChain, SlotData, + SelectChain, SlotData, import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}, }; use sp_consensus_babe::inherents::BabeInherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; -use sp_consensus::import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}; use sc_client_api::{ - backend::AuxStore, - BlockchainEvents, ProvideUncles, + backend::AuxStore, BlockchainEvents, ProvideUncles, }; use sp_block_builder::BlockBuilder as BlockBuilderApi; use futures::channel::mpsc::{channel, Sender, Receiver}; @@ -114,6 +109,7 @@ use log::{debug, info, log, trace, warn}; use prometheus_endpoint::Registry; use sc_consensus_slots::{ SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, + BackoffAuthoringBlocksStrategy }; use sc_consensus_epochs::{ descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, @@ -125,6 +121,7 @@ use sp_blockchain::{ use schnorrkel::SignatureError; use codec::{Encode, Decode}; use sp_api::ApiExt; +use sp_consensus_slots::Slot; mod verification; mod migration; @@ -140,9 +137,9 @@ pub struct Epoch { /// The epoch index. pub epoch_index: u64, /// The starting slot of the epoch. - pub start_slot: SlotNumber, + pub start_slot: Slot, /// The duration of this epoch. - pub duration: SlotNumber, + pub duration: u64, /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. @@ -153,7 +150,7 @@ pub struct Epoch { impl EpochT for Epoch { type NextEpochDescriptor = (NextEpochDescriptor, BabeEpochConfiguration); - type SlotNumber = SlotNumber; + type Slot = Slot; fn increment( &self, @@ -169,11 +166,11 @@ impl EpochT for Epoch { } } - fn start_slot(&self) -> SlotNumber { + fn start_slot(&self) -> Slot { self.start_slot } - fn end_slot(&self) -> SlotNumber { + fn end_slot(&self) -> Slot { self.start_slot + self.duration } } @@ -183,11 +180,11 @@ impl Epoch { /// the first block, so that has to be provided. pub fn genesis( genesis_config: &BabeGenesisConfiguration, - slot_number: SlotNumber + slot: Slot, ) -> Epoch { Epoch { epoch_index: 0, - start_slot: slot_number, + start_slot: slot, duration: genesis_config.epoch_length, authorities: genesis_config.genesis_authorities.clone(), randomness: genesis_config.randomness, @@ -199,58 +196,86 @@ impl Epoch { } } +/// Errors encountered by the babe authorship task. #[derive(derive_more::Display, Debug)] -enum Error { +pub enum Error { + /// Multiple BABE pre-runtime digests #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] MultiplePreRuntimeDigests, + /// No BABE pre-runtime digest found #[display(fmt = "No BABE pre-runtime digest found")] NoPreRuntimeDigest, + /// Multiple BABE epoch change digests #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] MultipleEpochChangeDigests, + /// Multiple BABE config change digests #[display(fmt = "Multiple BABE config change digests, rejecting!")] MultipleConfigChangeDigests, + /// Could not extract timestamp and slot #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] Extraction(sp_consensus::Error), + /// Could not fetch epoch #[display(fmt = "Could not fetch epoch at {:?}", _0)] FetchEpoch(B::Hash), + /// Header rejected: too far in the future #[display(fmt = "Header {:?} rejected: too far in the future", _0)] TooFarInFuture(B::Hash), + /// Parent unavailable. Cannot import #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] ParentUnavailable(B::Hash, B::Hash), + /// Slot number must increase #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] - SlotNumberMustIncrease(u64, u64), + SlotMustIncrease(Slot, Slot), + /// Header has a bad seal #[display(fmt = "Header {:?} has a bad seal", _0)] HeaderBadSeal(B::Hash), + /// Header is unsealed #[display(fmt = "Header {:?} is unsealed", _0)] HeaderUnsealed(B::Hash), + /// Slot author not found #[display(fmt = "Slot author not found")] SlotAuthorNotFound, + /// Secondary slot assignments are disabled for the current epoch. #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] SecondarySlotAssignmentsDisabled, + /// Bad signature #[display(fmt = "Bad signature on {:?}", _0)] BadSignature(B::Hash), + /// Invalid author: Expected secondary author #[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)] InvalidAuthor(AuthorityId, AuthorityId), + /// No secondary author expected. #[display(fmt = "No secondary author expected.")] NoSecondaryAuthorExpected, + /// VRF verification of block by author failed #[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)] VRFVerificationOfBlockFailed(AuthorityId, u128), + /// VRF verification failed #[display(fmt = "VRF verification failed: {:?}", _0)] VRFVerificationFailed(SignatureError), + /// Could not fetch parent header #[display(fmt = "Could not fetch parent header: {:?}", _0)] FetchParentHeader(sp_blockchain::Error), + /// Expected epoch change to happen. #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] - ExpectedEpochChange(B::Hash, u64), + ExpectedEpochChange(B::Hash, Slot), + /// Unexpected config change. #[display(fmt = "Unexpected config change")] UnexpectedConfigChange, + /// Unexpected epoch change #[display(fmt = "Unexpected epoch change")] UnexpectedEpochChange, + /// Parent block has no associated weight #[display(fmt = "Parent block of {} has no associated weight", _0)] ParentBlockNoAssociatedWeight(B::Hash), #[display(fmt = "Checking inherents failed: {}", _0)] + /// Check Inherents error CheckInherents(String), + /// Client error Client(sp_blockchain::Error), + /// Runtime error Runtime(sp_inherents::Error), + /// Fork tree error ForkTree(Box>), } @@ -315,6 +340,11 @@ impl Config { } } } + + /// Get the inner slot duration, in milliseconds. + pub fn slot_duration(&self) -> u64 { + self.0.slot_duration() + } } impl std::ops::Deref for Config { @@ -326,7 +356,7 @@ impl std::ops::Deref for Config { } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -353,6 +383,9 @@ pub struct BabeParams { /// Force authoring of blocks even if we are offline pub force_authoring: bool, + /// Strategy and parameters for backing off block production. + pub backoff_authoring_blocks: Option, + /// The source of timestamps for relative slots pub babe_link: BabeLink, @@ -361,7 +394,7 @@ pub struct BabeParams { } /// Start the babe worker. -pub fn start_babe(BabeParams { +pub fn start_babe(BabeParams { keystore, client, select_chain, @@ -370,9 +403,10 @@ pub fn start_babe(BabeParams { sync_oracle, inherent_data_providers, force_authoring, + backoff_authoring_blocks, babe_link, can_author_with, -}: BabeParams) -> Result< +}: BabeParams) -> Result< BabeWorker, sp_consensus::Error, > where @@ -388,6 +422,7 @@ pub fn start_babe(BabeParams { Error: std::error::Error + Send + From + From + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, CAW: CanAuthorWith + Send + 'static, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { let config = babe_link.config; let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); @@ -398,6 +433,7 @@ pub fn start_babe(BabeParams { env, sync_oracle: sync_oracle.clone(), force_authoring, + backoff_authoring_blocks, keystore, epoch_changes: babe_link.epoch_changes.clone(), slot_notification_sinks: slot_notification_sinks.clone(), @@ -431,7 +467,7 @@ pub fn start_babe(BabeParams { #[must_use] pub struct BabeWorker { inner: Pin + Send + 'static>>, - slot_notification_sinks: Arc, Epoch>)>>>>, + slot_notification_sinks: SlotNotificationSinks, } impl BabeWorker { @@ -439,7 +475,7 @@ impl BabeWorker { /// epoch descriptor. pub fn slot_notification_stream( &self - ) -> Receiver<(u64, ViableEpochDescriptor, Epoch>)> { + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { const CHANNEL_BUFFER_SIZE: usize = 1024; let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); @@ -460,21 +496,26 @@ impl futures::Future for BabeWorker { } /// Slot notification sinks. -type SlotNotificationSinks = Arc::Hash, NumberFor, Epoch>)>>>>; +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>> +>; -struct BabeSlotWorker { +struct BabeSlotWorker { client: Arc, block_import: Arc>, env: E, sync_oracle: SO, force_authoring: bool, + backoff_authoring_blocks: Option, keystore: SyncCryptoStorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, config: Config, } -impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where +impl sc_consensus_slots::SimpleSlotWorker + for BabeSlotWorker +where B: BlockT, C: ProvideRuntimeApi + ProvideCache + @@ -485,6 +526,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, SO: SyncOracle + Send + Clone, + BS: BackoffAuthoringBlocksStrategy>, Error: std::error::Error + Send + From + From + 'static, { type EpochData = ViableEpochDescriptor, Epoch>; @@ -507,13 +549,13 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn epoch_data( &self, parent: &B::Header, - slot_number: u64, + slot: Slot, ) -> Result { self.epoch_changes.lock().epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), - slot_number, + slot, ) .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) @@ -528,12 +570,12 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn claim_slot( &self, _parent_header: &B::Header, - slot_number: SlotNumber, + slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { - debug!(target: "babe", "Attempting to claim slot {}", slot_number); + debug!(target: "babe", "Attempting to claim slot {}", slot); let s = authorship::claim_slot( - slot_number, + slot, self.epoch_changes.lock().viable_epoch( &epoch_descriptor, |slot| Epoch::genesis(&self.config, slot) @@ -542,7 +584,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot ); if s.is_some() { - debug!(target: "babe", "Claimed slot {}", slot_number); + debug!(target: "babe", "Claimed slot {}", slot); } s @@ -551,12 +593,12 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn notify_slot( &self, _parent_header: &B::Header, - slot_number: SlotNumber, + slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) { self.slot_notification_sinks.lock() .retain_mut(|sink| { - match sink.try_send((slot_number, epoch_descriptor.clone())) { + match sink.try_send((slot, epoch_descriptor.clone())) { Ok(()) => true, Err(e) => { if e.is_full() { @@ -572,7 +614,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn pre_digest_data( &self, - _slot_number: u64, + _slot: Slot, claim: &Self::Claim, ) -> Vec> { vec![ @@ -629,6 +671,23 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot self.force_authoring } + fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = find_pre_digest::(chain_head) + .map(|digest| digest.slot()) + { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot, + self.logging_target(), + ); + } + } + false + } + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { &mut self.sync_oracle } @@ -641,22 +700,28 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn proposing_remaining_duration( &self, - head: &B::Header, + parent_head: &B::Header, slot_info: &SlotInfo, ) -> Option { let slot_remaining = self.slot_remaining_duration(slot_info); - let parent_slot = match find_pre_digest::(head) { + // If parent is genesis block, we don't require any lenience factor. + if parent_head.number().is_zero() { + return Some(slot_remaining) + } + + let parent_slot = match find_pre_digest::(parent_head) { Err(_) => return Some(slot_remaining), - Ok(d) => d.slot_number(), + Ok(d) => d.slot(), }; if let Some(slot_lenience) = sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) { - debug!(target: "babe", + debug!( + target: "babe", "No block for {} slots. Applying exponential lenience of {}s", - slot_info.number.saturating_sub(parent_slot + 1), + slot_info.slot.saturating_sub(parent_slot + 1), slot_lenience.as_secs(), ); @@ -669,13 +734,12 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -fn find_pre_digest(header: &B::Header) -> Result> -{ +pub fn find_pre_digest(header: &B::Header) -> Result> { // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { return Ok(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - slot_number: 0, + slot: 0.into(), authority_index: 0, })); } @@ -737,7 +801,7 @@ impl SlotCompatible for TimeSource { fn extract_timestamp_and_slot( &self, data: &InherentData, - ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { + ) -> Result<(TimestampInherent, Slot, std::time::Duration), sp_consensus::Error> { trace!(target: "babe", "extract timestamp"); data.timestamp_inherent_data() .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) @@ -822,8 +886,8 @@ where fn check_and_report_equivocation( &self, - slot_now: SlotNumber, - slot: SlotNumber, + slot_now: Slot, + slot: Slot, header: &Block::Header, author: &AuthorityId, origin: &BlockOrigin, @@ -948,7 +1012,7 @@ where descendent_query(&*self.client), &parent_hash, parent_header_metadata.number, - pre_digest.slot_number(), + pre_digest.slot(), ) .map_err(|e| Error::::ForkTree(Box::new(e)))? .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; @@ -970,14 +1034,14 @@ where CheckedHeader::Checked(pre_header, verified_info) => { let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); - let slot_number = babe_pre_digest.slot_number(); + let slot = babe_pre_digest.slot(); // the header is valid but let's check if there was something else already // proposed at the same slot by the given author. if there was, we will // report the equivocation to the runtime. if let Err(err) = self.check_and_report_equivocation( slot_now, - slot_number, + slot, &header, &verified_info.author, &origin, @@ -989,7 +1053,7 @@ where // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. if let Some(inner_body) = body.take() { - inherent_data.babe_replace_inherent_data(slot_number); + inherent_data.babe_replace_inherent_data(slot); let block = Block::new(pre_header.clone(), inner_body); self.check_inherents( @@ -1119,7 +1183,7 @@ impl BlockImport for BabeBlockImport(&block.header) .expect("valid babe headers must contain a predigest; \ header has been already verified; qed"); - let slot_number = pre_digest.slot_number(); + let slot = pre_digest.slot(); let parent_hash = *block.header.parent_hash(); let parent_header = self.client.header(BlockId::Hash(parent_hash)) @@ -1129,15 +1193,15 @@ impl BlockImport for BabeBlockImport(&parent_header) - .map(|d| d.slot_number()) + .map(|d| d.slot()) .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ header has already been verified; qed"); // make sure that slot number is strictly increasing - if slot_number <= parent_slot { + if slot <= parent_slot { return Err( ConsensusError::ClientImport(babe_err( - Error::::SlotNumberMustIncrease(parent_slot, slot_number) + Error::::SlotMustIncrease(parent_slot, slot) ).into()) ); } @@ -1190,7 +1254,7 @@ impl BlockImport for BabeBlockImport { return Err( ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), + babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), ) ) }, @@ -1235,7 +1299,7 @@ impl BlockImport for BabeBlockImport= start slot {}).", viable_epoch.as_ref().epoch_index, hash, - slot_number, + slot, viable_epoch.as_ref().start_slot, ); @@ -1360,7 +1424,7 @@ fn prune_finalized( find_pre_digest::(&finalized_header) .expect("finalized header must be valid; \ valid blocks have a pre-digest; qed") - .slot_number() + .slot() }; epoch_changes.prune_finalized( @@ -1423,7 +1487,6 @@ pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, - finality_proof_import: Option>, client: Arc, select_chain: SelectChain, inherent_data_providers: InherentDataProviders, @@ -1455,7 +1518,6 @@ pub fn import_queue( verifier, Box::new(block_import), justification_import, - finality_proof_import, spawner, registry, )) @@ -1469,7 +1531,7 @@ pub mod test_helpers { /// Try to claim the given slot and return a `BabePreDigest` if /// successful. pub fn claim_slot( - slot_number: u64, + slot: Slot, parent: &B::Header, client: &C, keystore: SyncCryptoStorePtr, @@ -1487,12 +1549,12 @@ pub mod test_helpers { descendent_query(client), &parent.hash(), parent.number().clone(), - slot_number, + slot, |slot| Epoch::genesis(&link.config, slot), ).unwrap().unwrap(); authorship::claim_slot( - slot_number, + slot, &epoch, &keystore, ).map(|(digest, _)| digest) diff --git a/client/consensus/babe/src/migration.rs b/client/consensus/babe/src/migration.rs index 2a5a8749cc3c1..fec73667da48d 100644 --- a/client/consensus/babe/src/migration.rs +++ b/client/consensus/babe/src/migration.rs @@ -1,9 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use codec::{Encode, Decode}; use sc_consensus_epochs::Epoch as EpochT; use crate::{ - Epoch, SlotNumber, AuthorityId, BabeAuthorityWeight, BabeGenesisConfiguration, + Epoch, AuthorityId, BabeAuthorityWeight, BabeGenesisConfiguration, BabeEpochConfiguration, VRF_OUTPUT_LENGTH, NextEpochDescriptor, }; +use sp_consensus_slots::Slot; /// BABE epoch information, version 0. #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] @@ -11,9 +30,9 @@ pub struct EpochV0 { /// The epoch index. pub epoch_index: u64, /// The starting slot of the epoch. - pub start_slot: SlotNumber, + pub start_slot: Slot, /// The duration of this epoch. - pub duration: SlotNumber, + pub duration: u64, /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. @@ -22,7 +41,7 @@ pub struct EpochV0 { impl EpochT for EpochV0 { type NextEpochDescriptor = NextEpochDescriptor; - type SlotNumber = SlotNumber; + type Slot = Slot; fn increment( &self, @@ -37,11 +56,11 @@ impl EpochT for EpochV0 { } } - fn start_slot(&self) -> SlotNumber { + fn start_slot(&self) -> Slot { self.start_slot } - fn end_slot(&self) -> SlotNumber { + fn end_slot(&self) -> Slot { self.start_slot + self.duration } } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 6b0f5870ba53d..9d03a3266d615 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! BABE testsuite @@ -26,21 +28,16 @@ use sp_keystore::{ SyncCryptoStore, vrf::make_transcript as transcript_from_data, }; -use sp_consensus_babe::{ - AuthorityPair, - SlotNumber, - AllowedSlots, - make_transcript, - make_transcript_data, -}; +use sp_consensus_babe::{AuthorityPair, Slot, AllowedSlots, make_transcript, make_transcript_data}; +use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, - import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, + import_queue::{BoxBlockImport, BoxJustificationImport}, }; use sc_network_test::*; use sc_network_test::{Block as TestBlock, PeersClient}; -use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; +use sc_network::config::ProtocolConfig; use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; use sc_client_api::{BlockchainEvents, backend::TransactionFor}; use log::debug; @@ -84,7 +81,7 @@ struct DummyProposer { factory: DummyFactory, parent_hash: Hash, parent_number: u64, - parent_slot: SlotNumber, + parent_slot: Slot, } impl Environment for DummyFactory { @@ -98,7 +95,7 @@ impl Environment for DummyFactory { let parent_slot = crate::find_pre_digest::(parent_header) .expect("parent header has a pre-digest") - .slot_number(); + .slot(); future::ready(Ok(DummyProposer { factory: self.clone(), @@ -134,7 +131,7 @@ impl DummyProposer { let this_slot = crate::find_pre_digest::(block.header()) .expect("baked block has valid pre-digest") - .slot_number(); + .slot(); // figure out if we should add a consensus digest, since the test runtime // doesn't. @@ -271,8 +268,6 @@ impl TestNetFactory for BabeTestNet { -> ( BlockImportAdapter, Option>, - Option>, - Option>, Option, ) { @@ -294,8 +289,6 @@ impl TestNetFactory for BabeTestNet { ( BlockImportAdapter::new_full(block_import), None, - None, - None, Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), ) } @@ -434,6 +427,7 @@ fn run_one_test( sync_oracle: DummyOracle, inherent_data_providers: data.inherent_data_providers.clone(), force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), babe_link: data.link.clone(), keystore, can_author_with: sp_consensus::AlwaysCanAuthor, @@ -529,7 +523,7 @@ fn can_author_block() { let mut i = 0; let epoch = Epoch { - start_slot: 0, + start_slot: 0.into(), authorities: vec![(public.into(), 1)], randomness: [0; 32], epoch_index: 1, @@ -550,7 +544,7 @@ fn can_author_block() { }; // with secondary slots enabled it should never be empty - match claim_slot(i, &epoch, &keystore) { + match claim_slot(i.into(), &epoch, &keystore) { None => i += 1, Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), } @@ -559,7 +553,7 @@ fn can_author_block() { // of times. config.allowed_slots = AllowedSlots::PrimarySlots; loop { - match claim_slot(i, &epoch, &keystore) { + match claim_slot(i.into(), &epoch, &keystore) { None => i += 1, Some(s) => { debug!(target: "babe", "Authored block {:?}", s.0); @@ -572,15 +566,15 @@ fn can_author_block() { // Propose and import a new BABE block on top of the given parent. fn propose_and_import_block( parent: &TestHeader, - slot_number: Option, + slot: Option, proposer_factory: &mut DummyFactory, block_import: &mut BoxBlockImport, ) -> sp_core::H256 { let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); - let slot_number = slot_number.unwrap_or_else(|| { + let slot = slot.unwrap_or_else(|| { let parent_pre_digest = find_pre_digest::(parent).unwrap(); - parent_pre_digest.slot_number() + 1 + parent_pre_digest.slot() + 1 }); let pre_digest = sp_runtime::generic::Digest { @@ -588,7 +582,7 @@ fn propose_and_import_block( Item::babe_pre_digest( PreDigest::SecondaryPlain(SecondaryPlainPreDigest { authority_index: 0, - slot_number, + slot, }), ), ], @@ -602,7 +596,7 @@ fn propose_and_import_block( descendent_query(&*proposer_factory.client), &parent_hash, *parent.number(), - slot_number, + slot, ).unwrap().unwrap(); let seal = { @@ -660,19 +654,19 @@ fn importing_block_one_sets_genesis_epoch() { let block_hash = propose_and_import_block( &genesis_header, - Some(999), + Some(999.into()), &mut proposer_factory, &mut block_import, ); - let genesis_epoch = Epoch::genesis(&data.link.config, 999); + let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); let epoch_changes = data.link.epoch_changes.lock(); let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( descendent_query(&*client), &block_hash, 1, - 1000, + 1000.into(), |slot| Epoch::genesis(&data.link.config, slot), ).unwrap().unwrap(); @@ -809,7 +803,7 @@ fn verify_slots_are_strictly_increasing() { // we should have no issue importing this block let b1 = propose_and_import_block( &genesis_header, - Some(999), + Some(999.into()), &mut proposer_factory, &mut block_import, ); @@ -820,7 +814,7 @@ fn verify_slots_are_strictly_increasing() { // we will panic due to the `PanickingBlockImport` defined above. propose_and_import_block( &b1, - Some(999), + Some(999.into()), &mut proposer_factory, &mut block_import, ); @@ -836,7 +830,7 @@ fn babe_transcript_generation_match() { .expect("Generates authority pair"); let epoch = Epoch { - start_slot: 0, + start_slot: 0.into(), authorities: vec![(public.into(), 1)], randomness: [0; 32], epoch_index: 1, @@ -847,8 +841,8 @@ fn babe_transcript_generation_match() { }, }; - let orig_transcript = make_transcript(&epoch.randomness.clone(), 1, epoch.epoch_index); - let new_transcript = make_transcript_data(&epoch.randomness, 1, epoch.epoch_index); + let orig_transcript = make_transcript(&epoch.randomness.clone(), 1.into(), epoch.epoch_index); + let new_transcript = make_transcript_data(&epoch.randomness, 1.into(), epoch.epoch_index); let test = |t: merlin::Transcript| -> [u8; 16] { let mut b = [0u8; 16]; diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index fd3c27be4f34e..53dfd9ed10cee 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -1,28 +1,31 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Verification for BABE headers. use sp_runtime::{traits::Header, traits::DigestItemFor}; use sp_core::{Pair, Public}; -use sp_consensus_babe::{make_transcript, AuthoritySignature, SlotNumber, AuthorityPair, AuthorityId}; +use sp_consensus_babe::{make_transcript, AuthoritySignature, AuthorityPair, AuthorityId}; use sp_consensus_babe::digests::{ PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, CompatibleDigestItem }; use sc_consensus_slots::CheckedHeader; +use sp_consensus_slots::Slot; use log::{debug, trace}; use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; use super::authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; @@ -36,7 +39,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// work. pub(super) pre_digest: Option, /// The slot number of the current time. - pub(super) slot_now: SlotNumber, + pub(super) slot_now: Slot, /// Epoch descriptor of the epoch this block _should_ be under, if it's valid. pub(super) epoch: &'a Epoch, } @@ -81,9 +84,9 @@ pub(super) fn check_header( // and that's what we sign let pre_hash = header.hash(); - if pre_digest.slot_number() > slot_now { + if pre_digest.slot() > slot_now { header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot())); } let author = match authorities.get(pre_digest.authority_index() as usize) { @@ -93,7 +96,11 @@ pub(super) fn check_header( match &pre_digest { PreDigest::Primary(primary) => { - debug!(target: "babe", "Verifying Primary block"); + debug!(target: "babe", + "Verifying primary block #{} at slot: {}", + header.number(), + primary.slot, + ); check_primary_header::( pre_hash, @@ -104,7 +111,12 @@ pub(super) fn check_header( )?; }, PreDigest::SecondaryPlain(secondary) if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => { - debug!(target: "babe", "Verifying Secondary plain block"); + debug!(target: "babe", + "Verifying secondary plain block #{} at slot: {}", + header.number(), + secondary.slot, + ); + check_secondary_plain_header::( pre_hash, secondary, @@ -113,7 +125,12 @@ pub(super) fn check_header( )?; }, PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { - debug!(target: "babe", "Verifying Secondary VRF block"); + debug!(target: "babe", + "Verifying secondary VRF block #{} at slot: {}", + header.number(), + secondary.slot, + ); + check_secondary_vrf_header::( pre_hash, secondary, @@ -157,7 +174,7 @@ fn check_primary_header( let (inout, _) = { let transcript = make_transcript( &epoch.randomness, - pre_digest.slot_number, + pre_digest.slot, epoch.epoch_index, ); @@ -197,7 +214,7 @@ fn check_secondary_plain_header( // check the signature is valid under the expected authority and // chain state. let expected_author = secondary_slot_author( - pre_digest.slot_number, + pre_digest.slot, &epoch.authorities, epoch.randomness, ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; @@ -225,7 +242,7 @@ fn check_secondary_vrf_header( // check the signature is valid under the expected authority and // chain state. let expected_author = secondary_slot_author( - pre_digest.slot_number, + pre_digest.slot, &epoch.authorities, epoch.randomness, ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; @@ -239,7 +256,7 @@ fn check_secondary_vrf_header( if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { let transcript = make_transcript( &epoch.randomness, - pre_digest.slot_number, + pre_digest.slot, epoch.epoch_index, ); diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 0a8a4c43d7117..41c42866e7272 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 1d9b072cfe964..a53517c5c35ea 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Collection of common consensus specific implementations mod longest_chain; diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index 981dbad0f6070..8cf32a1dbd3c1 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -1,18 +1,21 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . + //! Longest chain implementation use std::sync::Arc; @@ -98,4 +101,4 @@ impl SelectChain for LongestChain self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } -} \ No newline at end of file +} diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index d50ec29ed9c6e..bebe6979e694e 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -parking_lot = "0.10.0" -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0"} -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" , version = "2.0.0"} +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +parking_lot = "0.11.1" +fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "3.0.0"} +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "3.0.0"} diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index acb07dd668a3c..5c5ef446993a2 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Generic utilities for epoch-based consensus engines. @@ -76,13 +78,13 @@ pub trait Epoch { /// Descriptor for the next epoch. type NextEpochDescriptor; /// Type of the slot number. - type SlotNumber: Ord + Copy; + type Slot: Ord + Copy; /// The starting slot of the epoch. - fn start_slot(&self) -> Self::SlotNumber; + fn start_slot(&self) -> Self::Slot; /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - fn end_slot(&self) -> Self::SlotNumber; + fn end_slot(&self) -> Self::Slot; /// Increment the epoch data, using the next epoch descriptor. fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; } @@ -100,10 +102,10 @@ impl<'a, E: Epoch> From<&'a E> for EpochHeader { #[derive(Eq, PartialEq, Encode, Decode, Debug)] pub struct EpochHeader { /// The starting slot of the epoch. - pub start_slot: E::SlotNumber, + pub start_slot: E::Slot, /// The end slot of the epoch. This is NOT inclusive to the epoch, /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - pub end_slot: E::SlotNumber, + pub end_slot: E::Slot, } impl Clone for EpochHeader { @@ -213,14 +215,14 @@ impl ViableEpoch where #[derive(PartialEq, Eq, Clone, Debug)] pub enum ViableEpochDescriptor { /// The epoch is an unimported genesis, with given start slot number. - UnimportedGenesis(E::SlotNumber), + UnimportedGenesis(E::Slot), /// The epoch is signaled and has been imported, with given identifier and header. Signaled(EpochIdentifier, EpochHeader) } impl ViableEpochDescriptor { /// Start slot of the descriptor. - pub fn start_slot(&self) -> E::SlotNumber { + pub fn start_slot(&self) -> E::Slot { match self { Self::UnimportedGenesis(start_slot) => *start_slot, Self::Signaled(_, header) => header.start_slot, @@ -337,7 +339,7 @@ impl EpochChanges where /// Map the epoch changes from one storing data to a different one. pub fn map(self, mut f: F) -> EpochChanges where - B: Epoch, + B: Epoch, F: FnMut(&Hash, &Number, E) -> B, { EpochChanges { @@ -392,7 +394,7 @@ impl EpochChanges where descendent_of_builder: D, hash: &Hash, number: Number, - slot: E::SlotNumber, + slot: E::Slot, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(None); @@ -443,11 +445,11 @@ impl EpochChanges where descriptor: &ViableEpochDescriptor, make_genesis: G, ) -> Option> where - G: FnOnce(E::SlotNumber) -> E + G: FnOnce(E::Slot) -> E { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + ViableEpochDescriptor::UnimportedGenesis(slot) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) }, ViableEpochDescriptor::Signaled(identifier, _) => { self.epoch(&identifier).map(ViableEpoch::Signaled) @@ -477,11 +479,11 @@ impl EpochChanges where descriptor: &ViableEpochDescriptor, make_genesis: G, ) -> Option> where - G: FnOnce(E::SlotNumber) -> E + G: FnOnce(E::Slot) -> E { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + ViableEpochDescriptor::UnimportedGenesis(slot) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) }, ViableEpochDescriptor::Signaled(identifier, _) => { self.epoch_mut(&identifier).map(ViableEpoch::Signaled) @@ -498,12 +500,12 @@ impl EpochChanges where descriptor: &ViableEpochDescriptor, make_genesis: G ) -> Option where - G: FnOnce(E::SlotNumber) -> E, + G: FnOnce(E::Slot) -> E, E: Clone, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(make_genesis(*slot_number)) + ViableEpochDescriptor::UnimportedGenesis(slot) => { + Some(make_genesis(*slot)) }, ViableEpochDescriptor::Signaled(identifier, _) => { self.epoch(&identifier).cloned() @@ -521,17 +523,17 @@ impl EpochChanges where descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: E::SlotNumber, + slot: E::Slot, make_genesis: G, ) -> Result, fork_tree::Error> where - G: FnOnce(E::SlotNumber) -> E, + G: FnOnce(E::Slot) -> E, E: Clone, { let descriptor = self.epoch_descriptor_for_child_of( descendent_of_builder, parent_hash, parent_number, - slot_number + slot )?; Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) @@ -546,7 +548,7 @@ impl EpochChanges where descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: E::SlotNumber, + slot: E::Slot, ) -> Result>, fork_tree::Error> { // find_node_where will give you the node in the fork-tree which is an ancestor // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, @@ -559,7 +561,7 @@ impl EpochChanges where if parent_number == Zero::zero() { // need to insert the genesis epoch. - return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot_number))) + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))) } // We want to find the deepest node in the tree which is an ancestor @@ -569,9 +571,9 @@ impl EpochChanges where // we need. let predicate = |epoch: &PersistedEpochHeader| match *epoch { PersistedEpochHeader::Genesis(ref epoch_0, _) => - epoch_0.start_slot <= slot_number, + epoch_0.start_slot <= slot, PersistedEpochHeader::Regular(ref epoch_n) => - epoch_n.start_slot <= slot_number, + epoch_n.start_slot <= slot, }; self.inner.find_node_where( @@ -586,7 +588,7 @@ impl EpochChanges where // and here we figure out which of the internal epochs // of a genesis node to use based on their start slot. PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot <= slot_number { + if epoch_1.start_slot <= slot { (EpochIdentifierPosition::Genesis1, epoch_1.clone()) } else { (EpochIdentifierPosition::Genesis0, epoch_0.clone()) @@ -693,17 +695,17 @@ mod tests { } type Hash = [u8; 1]; - type SlotNumber = u64; + type Slot = u64; #[derive(Debug, Clone, Eq, PartialEq)] struct Epoch { - start_slot: SlotNumber, - duration: SlotNumber, + start_slot: Slot, + duration: Slot, } impl EpochT for Epoch { type NextEpochDescriptor = (); - type SlotNumber = SlotNumber; + type Slot = Slot; fn increment(&self, _: ()) -> Self { Epoch { @@ -712,11 +714,11 @@ mod tests { } } - fn end_slot(&self) -> SlotNumber { + fn end_slot(&self) -> Slot { self.start_slot + self.duration } - fn start_slot(&self) -> SlotNumber { + fn start_slot(&self) -> Slot { self.start_slot } } @@ -746,8 +748,8 @@ mod tests { ).unwrap().unwrap(); match genesis_epoch { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - assert_eq!(slot_number, 10101u64); + ViableEpochDescriptor::UnimportedGenesis(slot) => { + assert_eq!(slot, 10101u64); }, _ => panic!("should be unimported genesis"), }; @@ -760,8 +762,8 @@ mod tests { ).unwrap().unwrap(); match genesis_epoch_2 { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - assert_eq!(slot_number, 10102u64); + ViableEpochDescriptor::UnimportedGenesis(slot) => { + assert_eq!(slot, 10102u64); }, _ => panic!("should be unimported genesis"), }; diff --git a/client/consensus/epochs/src/migration.rs b/client/consensus/epochs/src/migration.rs index e4717b5584e0e..6e7baba8053af 100644 --- a/client/consensus/epochs/src/migration.rs +++ b/client/consensus/epochs/src/migration.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Migration types for epoch changes. diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index dba8121264f40..679fd5a3eb388 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" @@ -14,36 +14,39 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -futures = "0.3.4" -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" +futures = "0.3.9" +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" log = "0.4.8" -parking_lot = "0.10.0" +parking_lot = "0.11.1" +codec = { package = "parity-scale-codec", version = "2.0.0" } serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" -sc-client-api = { path = "../../api", version = "2.0.0" } -sc-consensus-babe = { path = "../../consensus/babe", version = "0.8.0" } -sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.8.0" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.8.0" } +sc-client-api = { path = "../../api", version = "3.0.0"} +sc-consensus-babe = { path = "../../consensus/babe", version = "0.9.0"} +sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.9.0"} +sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.9.0"} -sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0" } -sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0" } -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0" } -sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0" } -sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0" } -sp-core = { path = "../../../primitives/core", version = "2.0.0" } -sp-keystore = { path = "../../../primitives/keystore", version = "0.8.0" } -sp-api = { path = "../../../primitives/api", version = "2.0.0" } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0" } -sp-timestamp = { path = "../../../primitives/timestamp", version = "2.0.0" } +sc-transaction-pool = { path = "../../transaction-pool", version = "3.0.0"} +sp-blockchain = { path = "../../../primitives/blockchain", version = "3.0.0"} +sp-consensus = { path = "../../../primitives/consensus/common", version = "0.9.0"} +sp-consensus-slots = { path = "../../../primitives/consensus/slots", version = "0.9.0"} +sp-inherents = { path = "../../../primitives/inherents", version = "3.0.0"} +sp-runtime = { path = "../../../primitives/runtime", version = "3.0.0"} +sp-core = { path = "../../../primitives/core", version = "3.0.0"} +sp-keystore = { path = "../../../primitives/keystore", version = "0.9.0"} +sp-keyring = { path = "../../../primitives/keyring", version = "3.0.0"} +sp-api = { path = "../../../primitives/api", version = "3.0.0"} +sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "3.0.0"} +sp-timestamp = { path = "../../../primitives/timestamp", version = "3.0.0"} -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} [dev-dependencies] tokio = { version = "0.2", features = ["rt-core", "macros"] } -sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0" } +sc-basic-authorship = { path = "../../basic-authorship", version = "0.9.0"} substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0" } substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0" } tempfile = "3.1.0" diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index 7bafeb50207d4..0cfd99cab5c99 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index e51eb42e49e13..fb1ca629f693f 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ use super::ConsensusDataProvider; use crate::Error; - +use codec::Encode; use std::{ any::Any, borrow::Cow, @@ -30,21 +30,25 @@ use std::{ use sc_client_api::AuxStore; use sc_consensus_babe::{ Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, - register_babe_inherent_data_provider, INTERMEDIATE_KEY, + register_babe_inherent_data_provider, INTERMEDIATE_KEY, find_pre_digest, }; -use sc_consensus_epochs::{SharedEpochChanges, descendent_query}; +use sc_consensus_epochs::{SharedEpochChanges, descendent_query, ViableEpochDescriptor, EpochHeader}; +use sp_keystore::SyncCryptoStorePtr; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::BlockImportParams; -use sp_consensus_babe::{BabeApi, inherents::BabeInherentData}; -use sp_keystore::SyncCryptoStorePtr; +use sp_consensus_slots::Slot; +use sp_consensus_babe::{ + BabeApi, inherents::BabeInherentData, ConsensusLog, BABE_ENGINE_ID, AuthorityId, + digests::{PreDigest, SecondaryPlainPreDigest, NextEpochDescriptor}, BabeAuthorityWeight, +}; use sp_inherents::{InherentDataProviders, InherentData, ProvideInherentData, InherentIdentifier}; use sp_runtime::{ - traits::{DigestItemFor, DigestFor, Block as BlockT, Header as _}, - generic::Digest, + traits::{DigestItemFor, DigestFor, Block as BlockT, Zero, Header}, + generic::{Digest, BlockId}, }; -use sp_timestamp::{InherentType, InherentError, INHERENT_IDENTIFIER}; +use sp_timestamp::{InherentType, InherentError, INHERENT_IDENTIFIER, TimestampInherentData}; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. @@ -60,12 +64,15 @@ pub struct BabeConsensusDataProvider { /// BABE config, gotten from the runtime. config: Config, + + /// Authorities to be used for this babe chain. + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, } impl BabeConsensusDataProvider where B: BlockT, - C: AuxStore + ProvideRuntimeApi, + C: AuxStore + HeaderBackend + ProvideRuntimeApi + HeaderMetadata, C::Api: BabeApi, { pub fn new( @@ -73,9 +80,14 @@ impl BabeConsensusDataProvider keystore: SyncCryptoStorePtr, provider: &InherentDataProviders, epoch_changes: SharedEpochChanges, + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, ) -> Result { + if authorities.is_empty() { + return Err(Error::StringError("Cannot supply empty authority set!".into())) + } + let config = Config::get_or_compute(&*client)?; - let timestamp_provider = SlotTimestampProvider::new(config.slot_duration)?; + let timestamp_provider = SlotTimestampProvider::new(client.clone())?; provider.register_provider(timestamp_provider)?; register_babe_inherent_data_provider(provider, config.slot_duration)?; @@ -85,28 +97,18 @@ impl BabeConsensusDataProvider client, keystore, epoch_changes, + authorities, }) } -} - -impl ConsensusDataProvider for BabeConsensusDataProvider - where - B: BlockT, - C: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - C::Api: BabeApi, -{ - type Transaction = TransactionFor; - - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error> { - let slot_number = inherents.babe_inherent_data()?; + fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { let epoch_changes = self.epoch_changes.lock(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), - slot_number, + slot, ) .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; @@ -121,15 +123,74 @@ impl ConsensusDataProvider for BabeConsensusDataProvider sp_consensus::Error::InvalidAuthoritiesSet })?; - // this is a dev node environment, we should always be able to claim a slot. - let (predigest, _) = authorship::claim_slot(slot_number, epoch.as_ref(), &self.keystore) - .ok_or_else(|| Error::StringError("failed to claim slot for authorship".into()))?; + Ok(epoch.as_ref().clone()) + } +} + +impl ConsensusDataProvider for BabeConsensusDataProvider + where + B: BlockT, + C: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + C::Api: BabeApi, +{ + type Transaction = TransactionFor; - Ok(Digest { - logs: vec![ + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error> { + let slot = inherents.babe_inherent_data()?; + let epoch = self.epoch(parent, slot)?; + + // this is a dev node environment, we should always be able to claim a slot. + let logs = if let Some((predigest, _)) = authorship::claim_slot( + slot, + &epoch, + &self.keystore, + ) { + vec![ as CompatibleDigestItem>::babe_pre_digest(predigest), - ], - }) + ] + } else { + // well we couldn't claim a slot because this is an existing chain and we're not in the authorities. + // we need to tell BabeBlockImport that the epoch has changed, and we put ourselves in the authorities. + let predigest = PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + slot, + authority_index: 0_u32, + }); + + let mut epoch_changes = self.epoch_changes.lock(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) + .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + let epoch_mut = match epoch_descriptor { + ViableEpochDescriptor::Signaled(identifier, _epoch_header) => { + epoch_changes.epoch_mut(&identifier) + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)? + }, + _ => unreachable!("we couldn't claim a slot, so this isn't the genesis epoch; qed") + }; + + // mutate the current epoch + epoch_mut.authorities = self.authorities.clone(); + + let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: self.authorities.clone(), + // copy the old randomness + randomness: epoch_mut.randomness.clone(), + }); + + vec![ + DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()), + DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode()) + ] + }; + + Ok(Digest { logs }) } fn append_block_import( @@ -138,17 +199,43 @@ impl ConsensusDataProvider for BabeConsensusDataProvider params: &mut BlockImportParams, inherents: &InherentData ) -> Result<(), Error> { - let slot_number = inherents.babe_inherent_data()?; - - let epoch_descriptor = self.epoch_changes.lock() + let slot = inherents.babe_inherent_data()?; + let epoch_changes = self.epoch_changes.lock(); + let mut epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), - slot_number, + slot, ) - .map_err(|e| Error::StringError(format!("failed to fetch epoch data: {}", e)))? + .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + // drop the lock + drop(epoch_changes); + // a quick check to see if we're in the authorities + let epoch = self.epoch(parent, slot)?; + let (authority, _) = self.authorities.first().expect("authorities is non-emptyp; qed"); + let has_authority = epoch.authorities.iter() + .find(|(id, _)| *id == *authority) + .is_some(); + + if !has_authority { + log::info!(target: "manual-seal", "authority not found"); + let slot = inherents.timestamp_inherent_data()? / self.config.slot_duration; + // manually hard code epoch descriptor + epoch_descriptor = match epoch_descriptor { + ViableEpochDescriptor::Signaled(identifier, _header) => { + ViableEpochDescriptor::Signaled( + identifier, + EpochHeader { + start_slot: slot.into(), + end_slot: (slot * self.config.epoch_length).into(), + }, + ) + }, + _ => unreachable!("we're not in the authorities, so this isn't the genesis epoch; qed") + }; + } params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), @@ -168,12 +255,32 @@ struct SlotTimestampProvider { impl SlotTimestampProvider { /// create a new mocked time stamp provider. - fn new(slot_duration: u64) -> Result { - let now = SystemTime::now(); - let duration = now.duration_since(SystemTime::UNIX_EPOCH) - .map_err(|err| Error::StringError(format!("{}", err)))?; + fn new(client: Arc) -> Result + where + B: BlockT, + C: AuxStore + HeaderBackend + ProvideRuntimeApi, + C::Api: BabeApi, + { + let slot_duration = Config::get_or_compute(&*client)?.slot_duration; + let info = client.info(); + + // looks like this isn't the first block, rehydrate the fake time. + // otherwise we'd be producing blocks for older slots. + let duration = if info.best_number != Zero::zero() { + let header = client.header(BlockId::Hash(info.best_hash))?.unwrap(); + let slot = find_pre_digest::(&header).unwrap().slot(); + // add the slot duration so there's no collision of slots + (*slot * slot_duration) + slot_duration + } else { + // this is the first block, use the correct time. + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|err| Error::StringError(format!("{}", err)))? + .as_millis() as u64 + }; + Ok(Self { - time: atomic::AtomicU64::new(duration.as_millis() as u64), + time: atomic::AtomicU64::new(duration), slot_duration, }) } diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index e2628008c24c7..77140c835a3ee 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index 5780a25f97256..76ae6eeeae5ac 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -1,18 +1,20 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Block finalization utilities diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index d025d6aaf689f..3ec68588573eb 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -84,7 +84,6 @@ pub fn import_queue( ManualSealVerifier, block_import, None, - None, spawner, registry, ) @@ -164,10 +163,10 @@ pub async fn run_manual_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, + E::Proposer: Proposer>, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, + TransactionFor: 'static, { while let Some(command) = commands_stream.next().await { match command { @@ -231,9 +230,9 @@ pub async fn run_instant_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, - SC: SelectChain + 'static + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -294,7 +293,7 @@ mod tests { let inherent_data_providers = InherentDataProviders::new(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), api(), None, RevalidationType::Full, spawner.clone(), + Options::default(), true.into(), api(), None, RevalidationType::Full, spawner.clone(), )); let env = ProposerFactory::new( spawner.clone(), @@ -349,7 +348,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } } @@ -366,7 +364,7 @@ mod tests { let inherent_data_providers = InherentDataProviders::new(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), api(), None, RevalidationType::Full, spawner.clone(), + Options::default(), true.into(), api(), None, RevalidationType::Full, spawner.clone(), )); let env = ProposerFactory::new( spawner.clone(), @@ -416,7 +414,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } } @@ -442,7 +439,7 @@ mod tests { let pool_api = api(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), pool_api.clone(), None, RevalidationType::Full, spawner.clone(), + Options::default(), true.into(), pool_api.clone(), None, RevalidationType::Full, spawner.clone(), )); let env = ProposerFactory::new( spawner.clone(), @@ -494,7 +491,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true } } diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index 690b6c1eb9996..293d4487a5d59 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -1,18 +1,20 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! RPC interface for the `ManualSeal` Engine. diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 58f017f2d41ad..59b99349bf9b2 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -1,18 +1,20 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Block sealing utilities @@ -87,10 +89,10 @@ pub async fn seal_block( + Send + Sync + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - >::Error: std::fmt::Display, - >::Error: std::fmt::Display, + E::Proposer: Proposer>, P: txpool::ChainApi, SC: SelectChain, + TransactionFor: 'static, { let future = async { if pool.validated_pool().status().ready == 0 && !create_empty { @@ -111,7 +113,7 @@ pub async fn seal_block( }; let proposer = env.init(&parent) - .map_err(|err| Error::StringError(format!("{}", err))).await?; + .map_err(|err| Error::StringError(format!("{:?}", err))).await?; let id = inherent_data_provider.create_inherent_data()?; let inherents_len = id.len(); @@ -122,7 +124,7 @@ pub async fn seal_block( }; let proposal = proposer.propose(id.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) - .map_err(|err| Error::StringError(format!("{}", err))).await?; + .map_err(|err| Error::StringError(format!("{:?}", err))).await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { return Err(Error::EmptyTransactionPool) @@ -133,6 +135,7 @@ pub async fn seal_block( params.body = Some(body); params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.storage_changes = Some(proposal.storage_changes); if let Some(digest_provider) = digest_provider { digest_provider.append_block_import(&parent, &mut params, &id)?; diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index fbb02ccc71121..8be43a8fa04bc 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-pow" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8.0", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sp-block-builder = { version = "3.0.0", path = "../../../primitives/block-builder" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.9.0", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } futures-timer = "3.0.1" -parking_lot = "0.10.0" -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } +parking_lot = "0.11.1" +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } derive_more = "0.99.2" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index b73b9aa91f802..975a6f17e795f 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -56,7 +56,7 @@ use sp_consensus::{ BlockCheckParams, ImportResult, }; use sp_consensus::import_queue::{ - BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, BoxFinalityProofImport, + BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, }; use codec::{Encode, Decode}; use prometheus_endpoint::Registry; @@ -503,7 +503,6 @@ pub type PowImportQueue = BasicQueue; pub fn import_queue( block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, algorithm: Algorithm, inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, @@ -524,7 +523,6 @@ pub fn import_queue( verifier, block_import, justification_import, - finality_proof_import, spawner, registry, )) diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 4ed863dcd9ed9..c19c5524d9774 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index a13a712fe76b7..7ca413630e26e 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-slots" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" @@ -14,23 +14,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-consensus-slots = { version = "0.8.0", path = "../../../primitives/consensus/slots" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -futures = "0.3.4" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } +sp-application-crypto = { version = "3.0.0", path = "../../../primitives/application-crypto" } +sp-arithmetic = { version = "3.0.0", path = "../../../primitives/arithmetic" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-consensus-slots = { version = "0.9.0", path = "../../../primitives/consensus/slots" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.9.0", path = "../../../primitives/state-machine" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sc-telemetry = { version = "3.0.0", path = "../../telemetry" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } +futures = "0.3.9" futures-timer = "3.0.1" -parking_lot = "0.10.0" -log = "0.4.8" +parking_lot = "0.11.1" +log = "0.4.11" +thiserror = "1.0.21" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/build.rs b/client/consensus/slots/build.rs index 513cc234d4363..57424f016f3e5 100644 --- a/client/consensus/slots/build.rs +++ b/client/consensus/slots/build.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use std::env; diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index 1f1fe37068f82..db94ec48855e4 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -1,25 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Schema for slots in the aux-db. use codec::{Encode, Decode}; use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use sp_consensus_slots::EquivocationProof; +use sp_consensus_slots::{EquivocationProof, Slot}; use sp_runtime::traits::Header; const SLOT_HEADER_MAP_KEY: &[u8] = b"slot_header_map"; @@ -39,7 +41,7 @@ fn load_decode(backend: &C, key: &[u8]) -> ClientResult> None => Ok(None), Some(t) => T::decode(&mut &t[..]) .map_err( - |e| ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e.what())), + |e| ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e)), ) .map(Some) } @@ -50,8 +52,8 @@ fn load_decode(backend: &C, key: &[u8]) -> ClientResult> /// Note: it detects equivocations only when slot_now - slot <= MAX_SLOT_CAPACITY. pub fn check_equivocation( backend: &C, - slot_now: u64, - slot: u64, + slot_now: Slot, + slot: Slot, header: &H, signer: &P, ) -> ClientResult>> @@ -61,7 +63,7 @@ pub fn check_equivocation( P: Clone + Encode + Decode + PartialEq, { // We don't check equivocations for old headers out of our capacity. - if slot_now.saturating_sub(slot) > MAX_SLOT_CAPACITY { + if slot_now.saturating_sub(*slot) > Slot::from(MAX_SLOT_CAPACITY) { return Ok(None); } @@ -75,7 +77,7 @@ pub fn check_equivocation( // Get first slot saved. let slot_header_start = SLOT_HEADER_START.to_vec(); - let first_saved_slot = load_decode::<_, u64>(backend, &slot_header_start[..])? + let first_saved_slot = load_decode::<_, Slot>(backend, &slot_header_start[..])? .unwrap_or(slot); if slot_now < first_saved_slot { @@ -90,7 +92,7 @@ pub fn check_equivocation( // 2) with different hash if header.hash() != prev_header.hash() { return Ok(Some(EquivocationProof { - slot_number: slot, + slot, offender: signer.clone(), first_header: prev_header.clone(), second_header: header.clone(), @@ -107,11 +109,11 @@ pub fn check_equivocation( let mut keys_to_delete = vec![]; let mut new_first_saved_slot = first_saved_slot; - if slot_now - first_saved_slot >= PRUNING_BOUND { + if *slot_now - *first_saved_slot >= PRUNING_BOUND { let prefix = SLOT_HEADER_MAP_KEY.to_vec(); new_first_saved_slot = slot_now.saturating_sub(MAX_SLOT_CAPACITY); - for s in first_saved_slot..new_first_saved_slot { + for s in u64::from(first_saved_slot)..new_first_saved_slot.into() { let mut p = prefix.clone(); s.using_encoded(|s| p.extend(s)); keys_to_delete.push(p); @@ -172,8 +174,8 @@ mod test { assert!( check_equivocation( &client, - 2, - 2, + 2.into(), + 2.into(), &header1, &public, ).unwrap().is_none(), @@ -182,8 +184,8 @@ mod test { assert!( check_equivocation( &client, - 3, - 2, + 3.into(), + 2.into(), &header1, &public, ).unwrap().is_none(), @@ -193,8 +195,8 @@ mod test { assert!( check_equivocation( &client, - 4, - 2, + 4.into(), + 2.into(), &header2, &public, ).unwrap().is_some(), @@ -204,8 +206,8 @@ mod test { assert!( check_equivocation( &client, - 5, - 4, + 5.into(), + 4.into(), &header3, &public, ).unwrap().is_none(), @@ -215,8 +217,8 @@ mod test { assert!( check_equivocation( &client, - PRUNING_BOUND + 2, - MAX_SLOT_CAPACITY + 4, + (PRUNING_BOUND + 2).into(), + (MAX_SLOT_CAPACITY + 4).into(), &header4, &public, ).unwrap().is_none(), @@ -226,8 +228,8 @@ mod test { assert!( check_equivocation( &client, - PRUNING_BOUND + 3, - MAX_SLOT_CAPACITY + 4, + (PRUNING_BOUND + 3).into(), + (MAX_SLOT_CAPACITY + 4).into(), &header5, &public, ).unwrap().is_some(), @@ -237,8 +239,8 @@ mod test { assert!( check_equivocation( &client, - PRUNING_BOUND + 4, - 4, + (PRUNING_BOUND + 4).into(), + 4.into(), &header6, &public, ).unwrap().is_none(), diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 681d4a6273ed9..d851753921334 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Slots functionality for Substrate. //! @@ -20,7 +22,8 @@ //! time during which certain events can and/or must occur. This crate //! provides generic functionality for slots. -#![forbid(unsafe_code, missing_docs)] +#![forbid(unsafe_code)] +#![deny(missing_docs)] mod slots; mod aux_schema; @@ -29,18 +32,22 @@ pub use slots::SlotInfo; use slots::Slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; +use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; use codec::{Decode, Encode}; -use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; use futures::{prelude::*, future::{self, Either}}; use futures_timer::Delay; -use sp_inherents::{InherentData, InherentDataProviders}; use log::{debug, error, info, warn}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; +use parking_lot::Mutex; use sp_api::{ProvideRuntimeApi, ApiRef}; -use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; +use sp_arithmetic::traits::BaseArithmetic; +use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; +use sp_consensus_slots::Slot; +use sp_inherents::{InherentData, InherentDataProviders}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, HashFor, NumberFor} +}; use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; -use parking_lot::Mutex; /// The changes that need to applied to the storage to create the state for a block. /// @@ -109,7 +116,7 @@ pub trait SimpleSlotWorker { fn epoch_data( &self, header: &B::Header, - slot_number: u64, + slot: Slot, ) -> Result; /// Returns the number of authorities given the epoch data. @@ -120,7 +127,7 @@ pub trait SimpleSlotWorker { fn claim_slot( &self, header: &B::Header, - slot_number: u64, + slot: Slot, epoch_data: &Self::EpochData, ) -> Option; @@ -129,14 +136,14 @@ pub trait SimpleSlotWorker { fn notify_slot( &self, _header: &B::Header, - _slot_number: u64, + _slot: Slot, _epoch_data: &Self::EpochData, ) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data( &self, - slot_number: u64, + slot: Slot, claim: &Self::Claim, ) -> Vec>; @@ -158,6 +165,16 @@ pub trait SimpleSlotWorker { /// Whether to force authoring if offline. fn force_authoring(&self) -> bool; + /// Returns whether the block production should back off. + /// + /// By default this function always returns `false`. + /// + /// An example strategy that back offs if the finalized head is lagging too much behind the tip + /// is implemented by [`BackoffAuthoringOnFinalizedHeadLagging`]. + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + false + } + /// Returns a handle to a `SyncOracle`. fn sync_oracle(&mut self) -> &mut Self::SyncOracle; @@ -192,7 +209,7 @@ pub trait SimpleSlotWorker { where >::Proposal: Unpin + Send + 'static, { - let (timestamp, slot_number) = (slot_info.timestamp, slot_info.number); + let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); let slot_remaining_duration = self.slot_remaining_duration(&slot_info); let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); @@ -202,7 +219,7 @@ pub trait SimpleSlotWorker { debug!( target: self.logging_target(), "Skipping proposal slot {} since there's no time left to propose", - slot_number, + slot, ); return Box::pin(future::ready(None)); @@ -211,7 +228,7 @@ pub trait SimpleSlotWorker { None => Box::new(future::pending()) as Box<_>, }; - let epoch_data = match self.epoch_data(&chain_head, slot_number) { + let epoch_data = match self.epoch_data(&chain_head, slot) { Ok(epoch_data) => epoch_data, Err(err) => { warn!("Unable to fetch epoch data at block {:?}: {:?}", chain_head.hash(), err); @@ -226,7 +243,7 @@ pub trait SimpleSlotWorker { } }; - self.notify_slot(&chain_head, slot_number, &epoch_data); + self.notify_slot(&chain_head, slot, &epoch_data); let authorities_len = self.authorities_len(&epoch_data); @@ -244,34 +261,43 @@ pub trait SimpleSlotWorker { return Box::pin(future::ready(None)); } - let claim = match self.claim_slot(&chain_head, slot_number, &epoch_data) { + let claim = match self.claim_slot(&chain_head, slot, &epoch_data) { None => return Box::pin(future::ready(None)), Some(claim) => claim, }; + if self.should_backoff(slot, &chain_head) { + return Box::pin(future::ready(None)); + } + debug!( target: self.logging_target(), "Starting authorship at slot {}; timestamp = {}", - slot_number, + slot, timestamp, ); - telemetry!(CONSENSUS_DEBUG; "slots.starting_authorship"; - "slot_num" => slot_number, + telemetry!( + CONSENSUS_DEBUG; + "slots.starting_authorship"; + "slot_num" => *slot, "timestamp" => timestamp, ); let awaiting_proposer = self.proposer(&chain_head).map_err(move |err| { - warn!("Unable to author block in slot {:?}: {:?}", slot_number, err); + warn!("Unable to author block in slot {:?}: {:?}", slot, err); - telemetry!(CONSENSUS_WARN; "slots.unable_authoring_block"; - "slot" => slot_number, "err" => ?err + telemetry!( + CONSENSUS_WARN; + "slots.unable_authoring_block"; + "slot" => *slot, + "err" => ?err ); err }); - let logs = self.pre_digest_data(slot_number, &claim); + let logs = self.pre_digest_data(slot, &claim); // deadline our production to approx. the end of the slot let proposing = awaiting_proposer.and_then(move |proposer| proposer.propose( @@ -287,12 +313,14 @@ pub trait SimpleSlotWorker { futures::future::select(proposing, proposing_remaining).map(move |v| match v { Either::Left((b, _)) => b.map(|b| (b, claim)), Either::Right(_) => { - info!("⌛️ Discarding proposal for slot {}; block production took too long", slot_number); + info!("⌛️ Discarding proposal for slot {}; block production took too long", slot); // If the node was compiled with debug, tell the user to use release optimizations. #[cfg(build_type="debug")] info!("👉 Recompile your node in `--release` mode to mitigate this problem."); - telemetry!(CONSENSUS_INFO; "slots.discarding_proposal_took_too_long"; - "slot" => slot_number, + telemetry!( + CONSENSUS_INFO; + "slots.discarding_proposal_took_too_long"; + "slot" => *slot, ); Err(sp_consensus::Error::ClientImport("Timeout in the Slots proposer".into())) @@ -368,7 +396,7 @@ pub trait SlotCompatible { fn extract_timestamp_and_slot( &self, inherent: &InherentData, - ) -> Result<(u64, u64, std::time::Duration), sp_consensus::Error>; + ) -> Result<(u64, Slot, std::time::Duration), sp_consensus::Error>; } /// Start a new slot worker. @@ -409,12 +437,12 @@ where return Either::Right(future::ready(Ok(()))); } - let slot_num = slot_info.number; + let slot = slot_info.slot; let chain_head = match client.best_chain() { Ok(x) => x, Err(e) => { warn!(target: "slots", "Unable to author block in slot {}. \ - no best block header: {:?}", slot_num, e); + no best block header: {:?}", slot, e); return Either::Right(future::ready(Ok(()))); } }; @@ -424,7 +452,7 @@ where target: "slots", "Unable to author block in slot {},. `can_author_with` returned: {} \ Probably a node update is required!", - slot_num, + slot, err, ); Either::Right(future::ready(Ok(()))) @@ -445,7 +473,7 @@ where pub enum CheckedHeader { /// A header which has slot in the future. this is the full header (not stripped) /// and the slot in which it should be processed. - Deferred(H, u64), + Deferred(H, Slot), /// A header which is fully checked, including signature. This is the pre-header /// accompanied by the seal components. /// @@ -453,6 +481,13 @@ pub enum CheckedHeader { Checked(H, S), } +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error where T: Debug { + #[error("Slot duration is invalid: {0:?}")] + SlotDurationInvalid(SlotDuration), +} + /// A slot duration. Create with `get_or_compute`. // The internal member should stay private here to maintain invariants of // `get_or_compute`. @@ -466,7 +501,7 @@ impl Deref for SlotDuration { } } -impl SlotData for SlotDuration { +impl SlotData for SlotDuration { /// Get the slot duration in milliseconds. fn slot_duration(&self) -> u64 where T: SlotData, @@ -477,7 +512,7 @@ impl SlotData for SlotDuration { const SLOT_KEY: &'static [u8] = T::SLOT_KEY; } -impl SlotDuration { +impl SlotDuration { /// Either fetch the slot duration from disk or compute it from the /// genesis state. /// @@ -515,10 +550,8 @@ impl SlotDuration { } }?; - if slot_duration.slot_duration() == 0 { - return Err(sp_blockchain::Error::Msg( - "Invalid value for slot_duration: the value must be greater than 0.".into(), - )) + if slot_duration.slot_duration() == 0u64 { + return Err(sp_blockchain::Error::Application(Box::new(Error::SlotDurationInvalid(slot_duration)))) } Ok(slot_duration) @@ -534,7 +567,7 @@ impl SlotDuration { /// to parent. If the number of skipped slots is greated than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped /// this method will return `None.` -pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Option { +pub fn slot_lenience_exponential(parent_slot: Slot, slot_info: &SlotInfo) -> Option { // never give more than 2^this times the lenience. const BACKOFF_CAP: u64 = 7; @@ -547,7 +580,7 @@ pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Opti // exponential back-off. // in normal cases we only attempt to issue blocks up to the end of the slot. // when the chain has been stalled for a few slots, we give more lenience. - let skipped_slots = slot_info.number.saturating_sub(parent_slot + 1); + let skipped_slots = *slot_info.slot.saturating_sub(parent_slot + 1); if skipped_slots == 0 { None @@ -563,7 +596,7 @@ pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Opti /// to parent. If the number of skipped slots is greated than 0 this method will apply /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` -pub fn slot_lenience_linear(parent_slot: u64, slot_info: &SlotInfo) -> Option { +pub fn slot_lenience_linear(parent_slot: Slot, slot_info: &SlotInfo) -> Option { // never give more than 20 times more lenience. const BACKOFF_CAP: u64 = 20; @@ -573,7 +606,7 @@ pub fn slot_lenience_linear(parent_slot: u64, slot_info: &SlotInfo) -> Option Option { + /// Returns true if we should backoff authoring new blocks. + fn should_backoff( + &self, + chain_head_number: N, + chain_head_slot: Slot, + finalized_number: N, + slow_now: Slot, + logging_target: &str, + ) -> bool; +} + +/// A simple default strategy for how to decide backing off authoring blocks if the number of +/// unfinalized blocks grows too large. +#[derive(Clone)] +pub struct BackoffAuthoringOnFinalizedHeadLagging { + /// The max interval to backoff when authoring blocks, regardless of delay in finality. + pub max_interval: N, + /// The number of unfinalized blocks allowed before starting to consider to backoff authoring + /// blocks. Note that depending on the value for `authoring_bias`, there might still be an + /// additional wait until block authorship starts getting declined. + pub unfinalized_slack: N, + /// Scales the backoff rate. A higher value effectively means we backoff slower, taking longer + /// time to reach the maximum backoff as the unfinalized head of chain grows. + pub authoring_bias: N, +} + +/// These parameters is supposed to be some form of sensible defaults. +impl Default for BackoffAuthoringOnFinalizedHeadLagging { + fn default() -> Self { + Self { + // Never wait more than 100 slots before authoring blocks, regardless of delay in + // finality. + max_interval: 100.into(), + // Start to consider backing off block authorship once we have 50 or more unfinalized + // blocks at the head of the chain. + unfinalized_slack: 50.into(), + // A reasonable default for the authoring bias, or reciprocal interval scaling, is 2. + // Effectively meaning that consider the unfinalized head suffix length to grow half as + // fast as in actuality. + authoring_bias: 2.into(), + } + } +} + +impl BackoffAuthoringBlocksStrategy for BackoffAuthoringOnFinalizedHeadLagging +where + N: BaseArithmetic + Copy +{ + fn should_backoff( + &self, + chain_head_number: N, + chain_head_slot: Slot, + finalized_number: N, + slot_now: Slot, + logging_target: &str, + ) -> bool { + // This should not happen, but we want to keep the previous behaviour if it does. + if slot_now <= chain_head_slot { + return false; + } + + let unfinalized_block_length = chain_head_number - finalized_number; + let interval = unfinalized_block_length.saturating_sub(self.unfinalized_slack) + / self.authoring_bias; + let interval = interval.min(self.max_interval); + + // We're doing arithmetic between block and slot numbers. + let interval: u64 = interval.unique_saturated_into(); + + // If interval is nonzero we backoff if the current slot isn't far enough ahead of the chain + // head. + if *slot_now <= *chain_head_slot + interval { + info!( + target: logging_target, + "Backing off claiming new slot for block authorship: finality is lagging.", + ); + true + } else { + false + } + } +} + +impl BackoffAuthoringBlocksStrategy for () { + fn should_backoff( + &self, + _chain_head_number: N, + _chain_head_slot: Slot, + _finalized_number: N, + _slot_now: Slot, + _logging_target: &str, + ) -> bool { + false + } +} + #[cfg(test)] mod test { use std::time::{Duration, Instant}; + use crate::{BackoffAuthoringOnFinalizedHeadLagging, BackoffAuthoringBlocksStrategy}; + use substrate_test_runtime_client::runtime::Block; + use sp_api::NumberFor; const SLOT_DURATION: Duration = Duration::from_millis(6000); - fn slot(n: u64) -> super::slots::SlotInfo { + fn slot(slot: u64) -> super::slots::SlotInfo { super::slots::SlotInfo { - number: n, + slot: slot.into(), duration: SLOT_DURATION.as_millis() as u64, timestamp: Default::default(), inherent_data: Default::default(), @@ -602,20 +736,20 @@ mod test { #[test] fn linear_slot_lenience() { // if no slots are skipped there should be no lenience - assert_eq!(super::slot_lenience_linear(1, &slot(2)), None); + assert_eq!(super::slot_lenience_linear(1.into(), &slot(2)), None); // otherwise the lenience is incremented linearly with // the number of skipped slots. for n in 3..=22 { assert_eq!( - super::slot_lenience_linear(1, &slot(n)), + super::slot_lenience_linear(1.into(), &slot(n)), Some(SLOT_DURATION * (n - 2) as u32), ); } // but we cap it to a maximum of 20 slots assert_eq!( - super::slot_lenience_linear(1, &slot(23)), + super::slot_lenience_linear(1.into(), &slot(23)), Some(SLOT_DURATION * 20), ); } @@ -623,25 +757,364 @@ mod test { #[test] fn exponential_slot_lenience() { // if no slots are skipped there should be no lenience - assert_eq!(super::slot_lenience_exponential(1, &slot(2)), None); + assert_eq!(super::slot_lenience_exponential(1.into(), &slot(2)), None); // otherwise the lenience is incremented exponentially every two slots for n in 3..=17 { assert_eq!( - super::slot_lenience_exponential(1, &slot(n)), + super::slot_lenience_exponential(1.into(), &slot(n)), Some(SLOT_DURATION * 2u32.pow((n / 2 - 1) as u32)), ); } // but we cap it to a maximum of 14 slots assert_eq!( - super::slot_lenience_exponential(1, &slot(18)), + super::slot_lenience_exponential(1.into(), &slot(18)), Some(SLOT_DURATION * 2u32.pow(7)), ); assert_eq!( - super::slot_lenience_exponential(1, &slot(19)), + super::slot_lenience_exponential(1.into(), &slot(19)), Some(SLOT_DURATION * 2u32.pow(7)), ); } + + #[derive(PartialEq, Debug)] + struct HeadState { + head_number: NumberFor, + head_slot: u64, + slot_now: NumberFor, + } + + impl HeadState { + fn author_block(&mut self) { + // Add a block to the head, and set latest slot to the current + self.head_number += 1; + self.head_slot = self.slot_now; + // Advance slot to next + self.slot_now += 1; + } + + fn dont_author_block(&mut self) { + self.slot_now += 1; + } + } + + #[test] + fn should_never_backoff_when_head_not_advancing() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let head_number = 1; + let head_slot = 1; + let finalized_number = 1; + let slot_now = 2; + + let should_backoff: Vec = (slot_now..1000) + .map(|s| strategy.should_backoff(head_number, head_slot.into(), finalized_number, s.into(), "slots")) + .collect(); + + // Should always be false, since the head isn't advancing + let expected: Vec = (slot_now..1000).map(|_| false).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_stop_authoring_if_blocks_are_still_produced_when_finality_stalled() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let mut head_number = 1; + let mut head_slot = 1; + let finalized_number = 1; + let slot_now = 2; + + let should_backoff: Vec = (slot_now..300) + .map(move |s| { + let b = strategy.should_backoff( + head_number, + head_slot.into(), + finalized_number, + s.into(), + "slots", + ); + // Chain is still advancing (by someone else) + head_number += 1; + head_slot = s; + b + }) + .collect(); + + // Should always be true after a short while, since the chain is advancing but finality is stalled + let expected: Vec = (slot_now..300).map(|s| s > 8).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_never_backoff_if_max_interval_is_reached() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + // The limit `max_interval` is used when the unfinalized chain grows to + // `max_interval * authoring_bias + unfinalized_slack`, + // which for the above parameters becomes + // 100 * 2 + 5 = 205. + // Hence we trigger this with head_number > finalized_number + 205. + let head_number = 207; + let finalized_number = 1; + + // The limit is then used once the current slot is `max_interval` ahead of slot of the head. + let head_slot = 1; + let slot_now = 2; + let max_interval = strategy.max_interval; + + let should_backoff: Vec = (slot_now..200) + .map(|s| strategy.should_backoff(head_number, head_slot.into(), finalized_number, s.into(), "slots")) + .collect(); + + // Should backoff (true) until we are `max_interval` number of slots ahead of the chain + // head slot, then we never backoff (false). + let expected: Vec = (slot_now..200).map(|s| s <= max_interval + head_slot).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_backoff_authoring_when_finality_stalled() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let finalized_number = 2; + let mut head_state = HeadState { + head_number: 4, + head_slot: 10, + slot_now: 11, + }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot.into(), + finalized_number, + head_state.slot_now.into(), + "slots", + ) + }; + + let backoff: Vec = (head_state.slot_now..200) + .map(|_| { + if should_backoff(&head_state) { + head_state.dont_author_block(); + true + } else { + head_state.author_block(); + false + } + }) + .collect(); + + // Gradually start to backoff more and more frequently + let expected = [ + false, false, false, false, false, // no effect + true, false, + true, false, // 1:1 + true, true, false, + true, true, false, // 2:1 + true, true, true, false, + true, true, true, false, // 3:1 + true, true, true, true, false, + true, true, true, true, false, // 4:1 + true, true, true, true, true, false, + true, true, true, true, true, false, // 5:1 + true, true, true, true, true, true, false, + true, true, true, true, true, true, false, // 6:1 + true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, false, // 7:1 + true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, false, // 8:1 + true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, false, // 9:1 + true, true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, true, false, // 10:1 + true, true, true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, true, true, false, // 11:1 + true, true, true, true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, true, true, true, false, // 12:1 + true, true, true, true, + ]; + + assert_eq!(backoff.as_slice(), &expected[..]); + } + + #[test] + fn should_never_wait_more_than_max_interval() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let finalized_number = 2; + let starting_slot = 11; + let mut head_state = HeadState { + head_number: 4, + head_slot: 10, + slot_now: starting_slot, + }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot.into(), + finalized_number, + head_state.slot_now.into(), + "slots", + ) + }; + + let backoff: Vec = (head_state.slot_now..40000) + .map(|_| { + if should_backoff(&head_state) { + head_state.dont_author_block(); + true + } else { + head_state.author_block(); + false + } + }) + .collect(); + + let slots_claimed: Vec = backoff + .iter() + .enumerate() + .filter(|&(_i, x)| x == &false) + .map(|(i, _x)| i + starting_slot as usize) + .collect(); + + let last_slot = backoff.len() + starting_slot as usize; + let mut last_two_claimed = slots_claimed.iter().rev().take(2); + + // Check that we claimed all the way to the end. Check two slots for when we have an uneven + // number of slots_claimed. + let expected_distance = param.max_interval as usize + 1; + assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92); + assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92 + expected_distance); + + let intervals: Vec<_> = slots_claimed + .windows(2) + .map(|x| x[1] - x[0]) + .collect(); + + // The key thing is that the distance between claimed slots is capped to `max_interval + 1` + // assert_eq!(max_observed_interval, Some(&expected_distance)); + assert_eq!(intervals.iter().max(), Some(&expected_distance)); + + // But lets assert all distances, which we expect to grow linearly until `max_interval + 1` + let expected_intervals: Vec<_> = (0..497) + .map(|i| (i/2).max(1).min(expected_distance) ) + .collect(); + + assert_eq!(intervals, expected_intervals); + } + + fn run_until_max_interval(param: BackoffAuthoringOnFinalizedHeadLagging) -> (u64, u64) { + let finalized_number = 0; + let mut head_state = HeadState { + head_number: 0, + head_slot: 0, + slot_now: 1, + }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot.into(), + finalized_number, + head_state.slot_now.into(), + "slots", + ) + }; + + // Number of blocks until we reach the max interval + let block_for_max_interval + = param.max_interval * param.authoring_bias + param.unfinalized_slack; + + while head_state.head_number < block_for_max_interval { + if should_backoff(&head_state) { + head_state.dont_author_block(); + } else { + head_state.author_block(); + } + } + + let slot_time = 6; + let time_to_reach_limit = slot_time * head_state.slot_now; + (block_for_max_interval, time_to_reach_limit) + } + + // Denoting + // C: unfinalized_slack + // M: authoring_bias + // X: max_interval + // then the number of slots to reach the max interval can be computed from + // (start_slot + C) + M * sum(n, 1, X) + // or + // (start_slot + C) + M * X*(X+1)/2 + fn expected_time_to_reach_max_interval( + param: &BackoffAuthoringOnFinalizedHeadLagging + ) -> (u64, u64) { + let c = param.unfinalized_slack; + let m = param.authoring_bias; + let x = param.max_interval; + let slot_time = 6; + + let block_for_max_interval = x * m + c; + + // The 1 is because we start at slot_now = 1. + let expected_number_of_slots = (1 + c) + m * x * (x + 1) / 2; + let time_to_reach = expected_number_of_slots * slot_time; + + (block_for_max_interval, time_to_reach) + } + + #[test] + fn time_to_reach_upper_bound_for_smaller_slack() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + let expected = expected_time_to_reach_max_interval(¶m); + let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param); + assert_eq!((block_for_max_interval, time_to_reach_limit), expected); + // Note: 16 hours is 57600 sec + assert_eq!((block_for_max_interval, time_to_reach_limit), (205, 60636)); + } + + #[test] + fn time_to_reach_upper_bound_for_larger_slack() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 50, + authoring_bias: 2, + }; + let expected = expected_time_to_reach_max_interval(¶m); + let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param); + assert_eq!((block_for_max_interval, time_to_reach_limit), expected); + assert_eq!((block_for_max_interval, time_to_reach_limit), (250, 60906)); + } } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index e7c84a2c1fd28..d3bddccce0fad 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -1,24 +1,26 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Utility stream for yielding slots in a loop. //! //! This is used instead of `futures_timer::Interval` because it was unreliable. -use super::SlotCompatible; +use super::{SlotCompatible, Slot}; use sp_consensus::Error; use futures::{prelude::*, task::Context, task::Poll}; use sp_inherents::{InherentData, InherentDataProviders}; @@ -46,7 +48,7 @@ pub fn time_until_next(now: Duration, slot_duration: u64) -> Duration { /// Information about a slot. pub struct SlotInfo { /// The slot number. - pub number: u64, + pub slot: Slot, /// Current timestamp. pub timestamp: u64, /// The instant at which the slot ends. @@ -59,7 +61,7 @@ pub struct SlotInfo { /// A stream that returns every time there is a new slot. pub(crate) struct Slots { - last_slot: u64, + last_slot: Slot, slot_duration: u64, inner_delay: Option, inherent_data_providers: InherentDataProviders, @@ -74,7 +76,7 @@ impl Slots { timestamp_extractor: SC, ) -> Self { Slots { - last_slot: 0, + last_slot: 0.into(), slot_duration, inner_delay: None, inherent_data_providers, @@ -112,7 +114,7 @@ impl Stream for Slots { Err(err) => return Poll::Ready(Some(Err(sp_consensus::Error::InherentData(err)))), }; let result = self.timestamp_extractor.extract_timestamp_and_slot(&inherent_data); - let (timestamp, slot_num, offset) = match result { + let (timestamp, slot, offset) = match result { Ok(v) => v, Err(err) => return Poll::Ready(Some(Err(err))), }; @@ -123,11 +125,11 @@ impl Stream for Slots { self.inner_delay = Some(Delay::new(ends_in)); // never yield the same slot twice. - if slot_num > self.last_slot { - self.last_slot = slot_num; + if slot > self.last_slot { + self.last_slot = slot; break Poll::Ready(Some(Ok(SlotInfo { - number: slot_num, + slot, duration: self.slot_duration, timestamp, ends_at, diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index bb23c829a6e09..14a8c850562cb 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-authorship = { version = "3.0.0", path = "../../../primitives/authorship" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } log = "0.4.8" diff --git a/client/consensus/uncles/src/lib.rs b/client/consensus/uncles/src/lib.rs index 2a129b200063b..f38849300d0da 100644 --- a/client/consensus/uncles/src/lib.rs +++ b/client/consensus/uncles/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Uncles functionality for Substrate. #![forbid(unsafe_code, missing_docs)] diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 70a0b19532593..72c26fead1c1c 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-db" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,37 +13,37 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.10.0" +parking_lot = "0.11.1" log = "0.4.8" -kvdb = "0.7.0" -kvdb-rocksdb = { version = "0.9.1", optional = true } -kvdb-memorydb = "0.7.0" +kvdb = "0.9.0" +kvdb-rocksdb = { version = "0.11.0", optional = true } +kvdb-memorydb = "0.9.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["std"] } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["std"] } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } blake2-rfc = "0.2.18" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0", path = "../executor" } -sc-state-db = { version = "0.8.0", path = "../state-db" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-database = { version = "2.0.0", path = "../../primitives/database" } -parity-db = { version = "0.1.2", optional = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-arithmetic = { version = "3.0.0", path = "../../primitives/arithmetic" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sc-executor = { version = "0.9.0", path = "../executor" } +sc-state-db = { version = "0.9.0", path = "../state-db" } +sp-trie = { version = "3.0.0", path = "../../primitives/trie" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-database = { version = "3.0.0", path = "../../primitives/database" } +parity-db = { version = "0.2.2", optional = true } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -quickcheck = "0.9" -kvdb-rocksdb = "0.9.1" +quickcheck = "1.0.3" +kvdb-rocksdb = "0.11.0" tempfile = "3" [features] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index f3c8f1aff9e14..f0c187bd379f1 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -350,13 +350,13 @@ impl StateBackend> for BenchmarkingState { } } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(child_info, f) + state.apply_to_child_keys_while(child_info, f) } } diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index 15ad339b1f2c1..341105b16a5b3 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/db/src/cache/list_entry.rs b/client/db/src/cache/list_entry.rs index d14fab9274ccb..94d4eb9f49b27 100644 --- a/client/db/src/cache/list_entry.rs +++ b/client/db/src/cache/list_entry.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs index 377d744effa60..e4b3677b4ab31 100644 --- a/client/db/src/cache/list_storage.rs +++ b/client/db/src/cache/list_storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 5501f0f1864c1..005d25b90f933 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index a2299a82337a0..6233eab3ea396 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -1,18 +1,20 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! DB-backed changes tries storage. diff --git a/client/db/src/children.rs b/client/db/src/children.rs index bfba797cd467b..62352e6d0614a 100644 --- a/client/db/src/children.rs +++ b/client/db/src/children.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Functionality for reading and storing children hashes from db. diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 8196a750557a8..6654083939dae 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -49,6 +49,9 @@ use std::sync::Arc; use std::path::{Path, PathBuf}; use std::io; use std::collections::{HashMap, HashSet}; +use parking_lot::{Mutex, RwLock}; +use linked_hash_map::LinkedHashMap; +use log::{trace, debug, warn}; use sc_client_api::{ UsageInfo, MemoryInfo, IoInfo, MemorySize, @@ -63,9 +66,8 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; -use parking_lot::RwLock; -use sp_core::ChangesTrieConfiguration; -use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; +use sp_core::{Hasher, ChangesTrieConfiguration}; +use sp_core::offchain::OffchainOverlayedChange; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Storage}; @@ -74,7 +76,7 @@ use sp_runtime::traits::{ }; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, - StorageCollection, ChildStorageCollection, + StorageCollection, ChildStorageCollection, OffchainChangesCollection, backend::Backend as StateBackend, StateMachineStats, }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; @@ -83,7 +85,6 @@ use sc_state_db::StateDb; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; -use log::{trace, debug, warn}; // Re-export the Database trait so that one can pass an implementation of it. pub use sp_database::Database; @@ -93,6 +94,7 @@ pub use sc_state_db::PruningMode; pub use bench::BenchmarkingState; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; +const CACHE_HEADERS: usize = 8; /// Default value for storage cache child ratio. const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); @@ -193,12 +195,12 @@ impl StateBackend> for RefTrackingState { self.state.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state.apply_to_child_keys_while(child_info, f) } fn for_child_keys_with_prefix( @@ -262,10 +264,33 @@ pub struct DatabaseSettings { pub state_cache_size: usize, /// Ratio of cache size dedicated to child tries. pub state_cache_child_ratio: Option<(usize, usize)>, - /// Pruning mode. - pub pruning: PruningMode, + /// State pruning mode. + pub state_pruning: PruningMode, /// Where to find the database. pub source: DatabaseSettingsSrc, + /// Block pruning mode. + pub keep_blocks: KeepBlocks, + /// Block body/Transaction storage scheme. + pub transaction_storage: TransactionStorageMode, +} + +/// Block pruning settings. +#[derive(Debug, Clone, Copy)] +pub enum KeepBlocks { + /// Keep full block history. + All, + /// Keep N recent finalized blocks. + Some(u32), +} + +/// Block body storage scheme. +#[derive(Debug, Clone, Copy)] +pub enum TransactionStorageMode { + /// Store block body as an encoded list of full transactions in the BODY column + BlockBody, + /// Store a list of hashes in the BODY column and each transaction individually + /// in the TRANSACTION column. + StorageChain, } /// Where to find the database.. @@ -332,6 +357,8 @@ pub(crate) mod columns { /// Offchain workers local storage pub const OFFCHAIN: u32 = 9; pub const CACHE: u32 = 10; + /// Transactions + pub const TRANSACTION: u32 = 11; } struct PendingBlock { @@ -352,16 +379,32 @@ impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { } } +fn cache_header( + cache: &mut LinkedHashMap>, + hash: Hash, + header: Option

, +) { + cache.insert(hash, header); + while cache.len() > CACHE_HEADERS { + cache.pop_front(); + } +} + /// Block database pub struct BlockchainDb { db: Arc>, meta: Arc, Block::Hash>>>, leaves: RwLock>>, header_metadata_cache: Arc>, + header_cache: Mutex>>, + transaction_storage: TransactionStorageMode, } impl BlockchainDb { - fn new(db: Arc>) -> ClientResult { + fn new( + db: Arc>, + transaction_storage: TransactionStorageMode + ) -> ClientResult { let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; Ok(BlockchainDb { @@ -369,6 +412,8 @@ impl BlockchainDb { leaves: RwLock::new(leaves), meta: Arc::new(RwLock::new(meta)), header_metadata_cache: Arc::new(HeaderMetadataCache::default()), + header_cache: Default::default(), + transaction_storage, }) } @@ -407,7 +452,20 @@ impl BlockchainDb { impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + match &id { + BlockId::Hash(h) => { + let mut cache = self.header_cache.lock(); + if let Some(result) = cache.get_refresh(h) { + return Ok(result.clone()); + } + let header = utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; + cache_header(&mut cache, h.clone(), header.clone()); + Ok(header) + } + BlockId::Number(_) => { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + } + } } fn info(&self) -> sc_client_api::blockchain::Info { @@ -424,12 +482,7 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha fn status(&self, id: BlockId) -> ClientResult { let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), + BlockId::Hash(_) => self.header(id)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { @@ -453,11 +506,30 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha impl sc_client_api::blockchain::Backend for BlockchainDb { fn body(&self, id: BlockId) -> ClientResult>> { match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { - Some(body) => match Decode::decode(&mut &body[..]) { - Ok(body) => Ok(Some(body)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body: {}", err) - )), + Some(body) => { + match self.transaction_storage { + TransactionStorageMode::BlockBody => match Decode::decode(&mut &body[..]) { + Ok(body) => Ok(Some(body)), + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body: {}", err) + )), + }, + TransactionStorageMode::StorageChain => { + match Vec::::decode(&mut &body[..]) { + Ok(hashes) => { + let extrinsics: ClientResult> = hashes.into_iter().map( + |h| self.extrinsic(&h).and_then(|maybe_ex| maybe_ex.ok_or_else( + || sp_blockchain::Error::Backend( + format!("Missing transaction: {}", h)))) + ).collect(); + Ok(Some(extrinsics?)) + } + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body list: {}", err) + )), + } + } + } } None => Ok(None), } @@ -490,6 +562,24 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult> { children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) } + + fn extrinsic(&self, hash: &Block::Hash) -> ClientResult> { + match self.db.get(columns::TRANSACTION, hash.as_ref()) { + Some(ex) => { + match Decode::decode(&mut &ex[..]) { + Ok(ex) => Ok(Some(ex)), + Err(err) => Err(sp_blockchain::Error::Backend( + format!("Error decoding extrinsic {}: {}", hash, err) + )), + } + }, + None => Ok(None), + } + } + + fn have_extrinsic(&self, hash: &Block::Hash) -> ClientResult { + Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) + } } impl sc_client_api::blockchain::ProvideCache for BlockchainDb { @@ -581,7 +671,7 @@ pub struct BlockImportOperation { db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, - offchain_storage_updates: OffchainOverlayedChanges, + offchain_storage_updates: OffchainChangesCollection, changes_trie_updates: MemoryDB>, changes_trie_build_cache_update: Option>>, changes_trie_config_update: Option>, @@ -594,15 +684,13 @@ pub struct BlockImportOperation { impl BlockImportOperation { fn apply_offchain(&mut self, transaction: &mut Transaction) { - for ((prefix, key), value_operation) in self.offchain_storage_updates.drain() { - let key: Vec = prefix - .into_iter() - .chain(sp_core::sp_std::iter::once(b'/')) - .chain(key.into_iter()) - .collect(); + for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) { + let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key); match value_operation { - OffchainOverlayedChange::SetValue(val) => transaction.set_from_vec(columns::OFFCHAIN, &key, val), - OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key), + OffchainOverlayedChange::SetValue(val) => + transaction.set_from_vec(columns::OFFCHAIN, &key, val), + OffchainOverlayedChange::Remove => + transaction.remove(columns::OFFCHAIN, &key), } } } @@ -714,7 +802,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_offchain_storage( &mut self, - offchain_update: OffchainOverlayedChanges, + offchain_update: OffchainChangesCollection, ) -> ClientResult<()> { self.offchain_storage_updates = offchain_update; Ok(()) @@ -832,6 +920,8 @@ pub struct Backend { shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, + keep_blocks: KeepBlocks, + transaction_storage: TransactionStorageMode, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, } @@ -848,13 +938,29 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { + Self::new_test_with_tx_storage( + keep_blocks, + canonicalization_delay, + TransactionStorageMode::BlockBody, + ) + } + + /// Create new memory-backed client backend for tests. + #[cfg(any(test, feature = "test-helpers"))] + fn new_test_with_tx_storage( + keep_blocks: u32, + canonicalization_delay: u64, + transaction_storage: TransactionStorageMode, + ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); let db_setting = DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - pruning: PruningMode::keep_blocks(keep_blocks), + state_pruning: PruningMode::keep_blocks(keep_blocks), source: DatabaseSettingsSrc::Custom(db), + keep_blocks: KeepBlocks::Some(keep_blocks), + transaction_storage, }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -865,14 +971,12 @@ impl Backend { canonicalization_delay: u64, config: &DatabaseSettings, ) -> ClientResult { - let is_archive_pruning = config.pruning.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; + let is_archive_pruning = config.state_pruning.is_archive(); + let blockchain = BlockchainDb::new(db.clone(), config.transaction_storage.clone())?; let meta = blockchain.meta.clone(); - let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from( - format!("State database error: {:?}", e) - ); + let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e); let state_db: StateDb<_, _> = StateDb::new( - config.pruning.clone(), + config.state_pruning.clone(), !config.source.supports_ref_counting(), &StateMetaDb(&*db), ).map_err(map_e)?; @@ -912,6 +1016,8 @@ impl Backend { is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), + keep_blocks: config.keep_blocks.clone(), + transaction_storage: config.transaction_storage.clone(), }) } @@ -1059,7 +1165,7 @@ impl Backend { trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(transaction, commit); }; @@ -1117,15 +1223,23 @@ impl Backend { hash, )?; - let header_metadata = CachedHeaderMetadata::from(&pending_block.header); - self.blockchain.insert_header_metadata( - header_metadata.hash, - header_metadata, - ); - transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); if let Some(body) = &pending_block.body { - transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); + match self.transaction_storage { + TransactionStorageMode::BlockBody => { + transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); + }, + TransactionStorageMode::StorageChain => { + let mut hashes = Vec::with_capacity(body.len()); + for extrinsic in body { + let extrinsic = extrinsic.encode(); + let hash = HashFor::::hash(&extrinsic); + transaction.set(columns::TRANSACTION, &hash.as_ref(), &extrinsic); + hashes.push(hash); + } + transaction.set_from_vec(columns::BODY, &lookup_key, hashes.encode()); + }, + } } if let Some(justification) = pending_block.justification { transaction.set_from_vec(columns::JUSTIFICATION, &lookup_key, justification.encode()); @@ -1195,9 +1309,7 @@ impl Backend { number_u64, &pending_block.header.parent_hash(), changeset, - ).map_err(|e: sc_state_db::Error| - sp_blockchain::Error::from(format!("State database error: {:?}", e)) - )?; + ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(&mut transaction, commit); // Check if need to finalize. Genesis is always finalized instantly. @@ -1271,7 +1383,7 @@ impl Backend { meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); - Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + Some((pending_block.header, number, hash, enacted, retracted, displaced_leaf, is_best, cache)) } else { None }; @@ -1297,7 +1409,11 @@ impl Backend { self.storage.db.commit(transaction)?; + // Apply all in-memory state shanges. + // Code beyond this point can't fail. + if let Some(( + header, number, hash, enacted, @@ -1306,6 +1422,12 @@ impl Backend { is_best, mut cache, )) = imported { + let header_metadata = CachedHeaderMetadata::from(&header); + self.blockchain.insert_header_metadata( + header_metadata.hash, + header_metadata, + ); + cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); cache.sync_cache( &enacted, &retracted, @@ -1352,7 +1474,7 @@ impl Backend { transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(transaction, commit); if !f_num.is_zero() { @@ -1368,6 +1490,7 @@ impl Backend { } } + self.prune_blocks(transaction, f_num)?; let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); match displaced { x @ &mut None => *x = Some(new_displaced), @@ -1376,6 +1499,50 @@ impl Backend { Ok(()) } + + fn prune_blocks( + &self, + transaction: &mut Transaction, + finalized: NumberFor, + ) -> ClientResult<()> { + if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { + // Always keep the last finalized block + let keep = std::cmp::max(keep_blocks, 1); + if finalized < keep.into() { + return Ok(()) + } + let number = finalized.saturating_sub(keep.into()); + match read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY, BlockId::::number(number))? { + Some(body) => { + debug!(target: "db", "Removing block #{}", number); + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::BODY, + BlockId::::number(number), + )?; + match self.transaction_storage { + TransactionStorageMode::BlockBody => {}, + TransactionStorageMode::StorageChain => { + match Vec::::decode(&mut &body[..]) { + Ok(hashes) => { + for h in hashes { + transaction.remove(columns::TRANSACTION, h.as_ref()); + } + } + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body list: {}", err) + )), + } + } + } + } + None => return Ok(()), + } + } + Ok(()) + } } fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db::CommitSet>) { @@ -1781,6 +1948,17 @@ pub(crate) mod tests { parent_hash: H256, changes: Option, Vec)>>, extrinsics_root: H256, + ) -> H256 { + insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new()) + } + + pub fn insert_block( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Option, Vec)>>, + extrinsics_root: H256, + body: Vec>, ) -> H256 { use sp_runtime::testing::Digest; @@ -1807,7 +1985,7 @@ pub(crate) mod tests { }; let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, Some(Vec::new()), None, NewBlockState::Best).unwrap(); + op.set_block_data(header, Some(body), None, NewBlockState::Best).unwrap(); op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); backend.commit_operation(op).unwrap(); @@ -1859,8 +2037,10 @@ pub(crate) mod tests { let backend = Backend::::new(DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - pruning: PruningMode::keep_blocks(1), + state_pruning: PruningMode::keep_blocks(1), source: DatabaseSettingsSrc::Custom(backing), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, }, 0).unwrap(); assert_eq!(backend.blockchain().info().best_number, 9); for i in 0..10 { @@ -2404,4 +2584,33 @@ pub(crate) mod tests { assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); } + + #[test] + fn prune_blocks_on_finalize() { + for storage in &[TransactionStorageMode::BlockBody, TransactionStorageMode::StorageChain] { + let backend = Backend::::new_test_with_tx_storage(2, 0, *storage); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0 .. 5 { + let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()]); + blocks.push(hash); + prev_hash = hash; + } + + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + for i in 1 .. 5 { + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + } + backend.commit_operation(op).unwrap(); + } + let bc = backend.blockchain(); + assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + } } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index acfb6217ce9e0..91f37dd374d9f 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/db/src/offchain.rs b/client/db/src/offchain.rs index c4f0ce115ca54..df45c4946e622 100644 --- a/client/db/src/offchain.rs +++ b/client/db/src/offchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,10 +18,7 @@ //! RocksDB-based offchain workers local storage. -use std::{ - collections::HashMap, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use crate::{columns, Database, DbHash, Transaction}; use parking_lot::Mutex; @@ -43,7 +40,7 @@ impl std::fmt::Debug for LocalStorage { impl LocalStorage { /// Create new offchain storage for tests (backed by memorydb) - #[cfg(any(test, feature = "test-helpers"))] + #[cfg(any(feature = "test-helpers", test))] pub fn new_test() -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); @@ -61,9 +58,8 @@ impl LocalStorage { impl sp_core::offchain::OffchainStorage for LocalStorage { fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let key: Vec = prefix.iter().chain(key).cloned().collect(); let mut tx = Transaction::new(); - tx.set(columns::OFFCHAIN, &key, value); + tx.set(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key), value); if let Err(err) = self.db.commit(tx) { error!("Error setting on local storage: {}", err) @@ -71,9 +67,8 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { } fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let key: Vec = prefix.iter().chain(key).cloned().collect(); let mut tx = Transaction::new(); - tx.remove(columns::OFFCHAIN, &key); + tx.remove(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key)); if let Err(err) = self.db.commit(tx) { error!("Error removing on local storage: {}", err) @@ -81,8 +76,7 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { } fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { - let key: Vec = prefix.iter().chain(key).cloned().collect(); - self.db.get(columns::OFFCHAIN, &key) + self.db.get(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key)) } fn compare_and_set( @@ -92,7 +86,7 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { old_value: Option<&[u8]>, new_value: &[u8], ) -> bool { - let key: Vec = prefix.iter().chain(item_key).cloned().collect(); + let key = concatenate_prefix_and_key(prefix, item_key); let key_lock = { let mut locks = self.locks.lock(); locks.entry(key.clone()).or_default().clone() @@ -122,6 +116,15 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { } } +/// Concatenate the prefix and key to create an offchain key in the db. +pub(crate) fn concatenate_prefix_and_key(prefix: &[u8], key: &[u8]) -> Vec { + prefix + .iter() + .chain(key.into_iter()) + .cloned() + .collect() +} + #[cfg(test)] mod tests { use super::*; diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 313069706f33f..71cc5117f19ee 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -37,6 +37,7 @@ pub fn open(path: &std::path::Path, db_type: DatabaseType) -> parity_db::Result>> { let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); + config.sync = true; // Flush each commit if db_type == DatabaseType::Full { let mut state_col = &mut config.columns[columns::STATE as usize]; state_col.ref_counted = true; diff --git a/client/db/src/stats.rs b/client/db/src/stats.rs index 8d208024b4bb2..3fd93db931d02 100644 --- a/client/db/src/stats.rs +++ b/client/db/src/stats.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 0b4b6d4f88ef5..2dde8d5058220 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1,20 +1,24 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -//! Global cache state. +//! Global state cache. Maintains recently queried/committed state values +//! Tracks changes over the span of a few recent blocks and handles forks +//! by tracking/removing cache entries for conflicting changes. use std::collections::{VecDeque, HashSet, HashMap}; use std::sync::Arc; @@ -341,12 +345,27 @@ impl CacheChanges { ); let cache = &mut *cache; // Filter out committing block if any. - let enacted: Vec<_> = enacted + let mut enacted: Vec<_> = enacted .iter() .filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) .cloned() .collect(); - cache.sync(&enacted, retracted); + + let mut retracted = std::borrow::Cow::Borrowed(retracted); + if let Some(commit_hash) = &commit_hash { + if let Some(m) = cache.modifications.iter_mut().find(|m| &m.hash == commit_hash) { + if m.is_canon != is_best { + // Same block comitted twice with different state changes. + // Treat it as reenacted/retracted. + if is_best { + enacted.push(commit_hash.clone()); + } else { + retracted.to_mut().push(commit_hash.clone()); + } + } + } + } + cache.sync(&enacted, &retracted); // Propagate cache only if committing on top of the latest canonical state // blocks are ordered by number and only one block with a given number is marked as canonical // (contributed to canonical state cache) @@ -584,12 +603,12 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.exists_child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state.apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -766,12 +785,12 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().exists_child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.caching_state().for_keys_in_child_storage(child_info, f) + self.caching_state().apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -1314,6 +1333,71 @@ mod tests { ); assert_eq!(s.storage(&key).unwrap(), None); } + + #[test] + fn same_block_no_changes() { + sp_tracing::try_init_simple(); + + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h1 = H256::random(); + let h2 = H256::random(); + + let shared = new_shared_cache::(256*1024, (0,1)); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![1]))], + vec![], + Some(h1), + Some(1), + true, + ); + assert_eq!(shared.lock().lru_storage.get(&key).unwrap(), &Some(vec![1])); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + + // commit as non-best + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h2), + Some(2), + false, + ); + + assert_eq!(shared.lock().lru_storage.get(&key).unwrap(), &Some(vec![1])); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + + // commit again as best with no changes + s.cache.sync_cache( + &[], + &[], + vec![], + vec![], + Some(h2), + Some(2), + true, + ); + assert_eq!(s.storage(&key).unwrap(), None); + } } #[cfg(test)] @@ -1387,50 +1471,46 @@ mod qc { } impl Arbitrary for Action { - fn arbitrary(gen: &mut G) -> Self { - let path = gen.next_u32() as u8; - let mut buf = [0u8; 32]; + fn arbitrary(gen: &mut quickcheck::Gen) -> Self { + let path = u8::arbitrary(gen); + let buf = (0..32).map(|_| u8::arbitrary(gen)).collect::>(); match path { 0..=175 => { - gen.fill_bytes(&mut buf[..]); Action::Next { - hash: H256::from(&buf), + hash: H256::from_slice(&buf[..]), changes: { let mut set = Vec::new(); - for _ in 0..gen.next_u32()/(64*256*256*256) { - set.push((vec![gen.next_u32() as u8], Some(vec![gen.next_u32() as u8]))); + for _ in 0..::arbitrary(gen)/(64*256*256*256) { + set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); } set } } }, 176..=220 => { - gen.fill_bytes(&mut buf[..]); Action::Fork { - hash: H256::from(&buf), - depth: ((gen.next_u32() as u8) / 32) as usize, + hash: H256::from_slice(&buf[..]), + depth: ((u8::arbitrary(gen)) / 32) as usize, changes: { let mut set = Vec::new(); - for _ in 0..gen.next_u32()/(64*256*256*256) { - set.push((vec![gen.next_u32() as u8], Some(vec![gen.next_u32() as u8]))); + for _ in 0..::arbitrary(gen)/(64*256*256*256) { + set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); } set } } }, 221..=240 => { - gen.fill_bytes(&mut buf[..]); Action::ReorgWithImport { - hash: H256::from(&buf), - depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 + hash: H256::from_slice(&buf[..]), + depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 } }, _ => { - gen.fill_bytes(&mut buf[..]); Action::FinalizationReorg { - fork_depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 - depth: ((gen.next_u32() as u8) / 64) as usize, // 0-3 + fork_depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 + depth: ((u8::arbitrary(gen)) / 64) as usize, // 0-3 } }, } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 95592d071f777..b6e49edba1978 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Database upgrade logic. @@ -22,21 +24,26 @@ use std::path::{Path, PathBuf}; use sp_runtime::traits::Block as BlockT; use crate::utils::DatabaseType; +use kvdb_rocksdb::{Database, DatabaseConfig}; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; /// Current db version. -const CURRENT_VERSION: u32 = 1; +const CURRENT_VERSION: u32 = 2; + +/// Number of columns in v1. +const V1_NUM_COLUMNS: u32 = 11; /// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { +pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); if !is_empty { let db_version = current_version(db_path)?; match db_version { 0 => Err(sp_blockchain::Error::Backend(format!("Unsupported database version: {}", db_version)))?, - 1 => (), + 1 => migrate_1_to_2::(db_path, db_type)?, + CURRENT_VERSION => (), _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, } } @@ -44,6 +51,16 @@ pub fn upgrade_db(db_path: &Path, _db_type: DatabaseType) -> sp_b update_version(db_path) } +/// Migration from version1 to version2: +/// 1) the number of columns has changed from 11 to 12; +/// 2) transactions column is added; +fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { + let db_path = db_path.to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).map_err(db_err)?; + db.add_column().map_err(db_err) +} /// Reads current database version from the file at given path. /// If the file does not exist returns 0. @@ -85,7 +102,7 @@ fn version_file_path(path: &Path) -> PathBuf { #[cfg(test)] mod tests { use sc_state_db::PruningMode; - use crate::{DatabaseSettings, DatabaseSettingsSrc}; + use crate::{DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, TransactionStorageMode}; use crate::tests::Block; use super::*; @@ -101,8 +118,10 @@ mod tests { crate::utils::open_database::(&DatabaseSettings { state_cache_size: 0, state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, + state_pruning: PruningMode::ArchiveAll, source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, }, DatabaseType::Full).map(|_| ()) } @@ -120,4 +139,15 @@ mod tests { open_database(db_dir.path()).unwrap(); assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); } + + #[test] + fn upgrade_from_1_to_2_works() { + for version_from_file in &[None, Some(1)] { + let db_dir = tempfile::TempDir::new().unwrap(); + let db_path = db_dir.path(); + create_db(db_path, *version_from_file); + open_database(db_path).unwrap(); + assert_eq!(current_version(db_path).unwrap(), CURRENT_VERSION); + } + } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index e999469c18ff0..cd9b2a6f56d41 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -37,7 +37,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc, Database, DbHash}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. #[cfg(any(feature = "with-kvdb-rocksdb", feature = "with-parity-db", feature = "test-helpers", test))] -pub const NUM_COLUMNS: u32 = 11; +pub const NUM_COLUMNS: u32 = 12; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; @@ -327,6 +327,23 @@ pub fn read_db( }) } +/// Remove database column entry for the given block. +pub fn remove_from_db( + transaction: &mut Transaction, + db: &dyn Database, + col_index: u32, + col: u32, + id: BlockId, +) -> sp_blockchain::Result<()> +where + Block: BlockT, +{ + block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { + Some(key) => Ok(transaction.remove(col, key.as_ref())), + None => Ok(()), + }) +} + /// Read a header from the database. pub fn read_header( db: &dyn Database, @@ -384,7 +401,13 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< } { let hash = header.hash(); - debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number()); + debug!( + target: "db", + "Opened blockchain db, fetched {} = {:?} ({})", + desc, + hash, + header.number() + ); Ok((hash, *header.number())) } else { Ok((genesis_hash.clone(), Zero::zero())) diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index b88e8926be141..e0b21b7fb6652 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,25 +15,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-tasks = { version = "2.0.0", path = "../../primitives/tasks" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-tasks = { version = "3.0.0", path = "../../primitives/tasks" } +sp-trie = { version = "3.0.0", path = "../../primitives/trie" } +sp-serializer = { version = "3.0.0", path = "../../primitives/serializer" } +sp-version = { version = "3.0.0", path = "../../primitives/version" } +sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" } wasmi = "0.6.2" parity-wasm = "0.41.0" lazy_static = "1.4.0" -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8.0", path = "common" } -sc-executor-wasmi = { version = "0.8.0", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8.0", path = "wasmtime", optional = true } -parking_lot = "0.10.0" +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sp-wasm-interface = { version = "3.0.0", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "3.0.0", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.9.0", path = "common" } +sc-executor-wasmi = { version = "0.9.0", path = "wasmi" } +sc-executor-wasmtime = { version = "0.9.0", path = "wasmtime", optional = true } +parking_lot = "0.11.1" log = "0.4.8" libsecp256k1 = "0.3.4" @@ -43,13 +43,13 @@ wat = "1.0" hex-literal = "0.3.1" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -test-case = "0.3.3" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sc-tracing = { version = "2.0.0", path = "../tracing" } -tracing = "0.1.19" -tracing-subscriber = "0.2.10" +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sc-tracing = { version = "3.0.0", path = "../tracing" } +tracing = "0.1.22" +tracing-subscriber = "0.2.15" +paste = "1.0" [features] default = [ "std" ] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 64ed23598f47c..7e13e37d33fbe 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,16 +14,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4.8" derive_more = "0.99.2" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } wasmi = "0.6.2" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } +sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interface" } +sp-serializer = { version = "3.0.0", path = "../../../primitives/serializer" } +thiserror = "1.0.21" [features] default = [] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index caed63c183e68..0af148fd95809 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,92 +25,89 @@ use wasmi; pub type Result = std::result::Result; /// Error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Unserializable Data - InvalidData(sp_serializer::Error), - /// Trap occurred during execution - Trap(wasmi::Trap), - /// Wasmi loading/instantiating error - Wasmi(wasmi::Error), - /// Error in the API. Parameter is an error message. - #[from(ignore)] + #[error("Unserializable data encountered")] + InvalidData(#[from] sp_serializer::Error), + + #[error(transparent)] + Trap(#[from] wasmi::Trap), + + #[error(transparent)] + Wasmi(#[from] wasmi::Error), + + #[error("API Error: {0}")] ApiError(String), - /// Method is not found - #[display(fmt="Method not found: '{}'", _0)] - #[from(ignore)] + + #[error("Method not found: '{0}'")] MethodNotFound(String), - /// Code is invalid (expected single byte) - #[display(fmt="Invalid Code: {}", _0)] - #[from(ignore)] + + #[error("Invalid Code (expected single byte): '{0}'")] InvalidCode(String), - /// Could not get runtime version. - #[display(fmt="On-chain runtime does not specify version")] + + #[error("On-chain runtime does not specify version")] VersionInvalid, - /// Externalities have failed. - #[display(fmt="Externalities error")] + + #[error("Externalities error")] Externalities, - /// Invalid index. - #[display(fmt="Invalid index provided")] + + #[error("Invalid index provided")] InvalidIndex, - /// Invalid return type. - #[display(fmt="Invalid type returned (should be u64)")] + + #[error("Invalid type returned (should be u64)")] InvalidReturn, - /// Runtime failed. - #[display(fmt="Runtime error")] + + #[error("Runtime error")] Runtime, - /// Runtime panicked. - #[display(fmt="Runtime panicked: {}", _0)] - #[from(ignore)] + + #[error("Runtime panicked: {0}")] RuntimePanicked(String), - /// Invalid memory reference. - #[display(fmt="Invalid memory reference")] + + #[error("Invalid memory reference")] InvalidMemoryReference, - /// The runtime must provide a global named `__heap_base` of type i32 for specifying where the - /// allocator is allowed to place its data. - #[display(fmt="The runtime doesn't provide a global named `__heap_base`")] + + #[error("The runtime doesn't provide a global named `__heap_base` of type `i32`")] HeapBaseNotFoundOrInvalid, - /// The runtime WebAssembly module is not allowed to have the `start` function. - #[display(fmt="The runtime has the `start` function")] + + #[error("The runtime must not have the `start` function defined")] RuntimeHasStartFn, - /// Some other error occurred + + #[error("Other: {0}")] Other(String), - /// Some error occurred in the allocator - #[display(fmt="Error in allocator: {}", _0)] - Allocator(sp_allocator::Error), - /// Execution of a host function failed. - #[display(fmt="Host function {} execution failed with: {}", _0, _1)] + + #[error(transparent)] + Allocator(#[from] sp_allocator::Error), + + #[error("Host function {0} execution failed with: {1}")] FunctionExecution(String, String), - /// No table is present. - /// - /// Call was requested that requires table but none was present in the instance. - #[display(fmt="No table exported by wasm blob")] + + #[error("No table exported by wasm blob")] NoTable, - /// No table entry is present. - /// - /// Call was requested that requires specific entry in the table to be present. - #[display(fmt="No table entry with index {} in wasm blob exported table", _0)] - #[from(ignore)] + + #[error("No table entry with index {0} in wasm blob exported table")] NoTableEntryWithIndex(u32), - /// Table entry is not a function. - #[display(fmt="Table element with index {} is not a function in wasm blob exported table", _0)] - #[from(ignore)] + + #[error("Table element with index {0} is not a function in wasm blob exported table")] TableElementIsNotAFunction(u32), - /// Function in table is null and thus cannot be called. - #[display(fmt="Table entry with index {} in wasm blob is null", _0)] - #[from(ignore)] + + #[error("Table entry with index {0} in wasm blob is null")] FunctionRefIsNull(u32), -} -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::InvalidData(ref err) => Some(err), - Error::Trap(ref err) => Some(err), - Error::Wasmi(ref err) => Some(err), - _ => None, - } - } + #[error(transparent)] + RuntimeConstruction(#[from] WasmError), + + #[error("Shared memory is not supported")] + SharedMemUnsupported, + + #[error("Imported globals are not supported yet")] + ImportedGlobalsUnsupported, + + #[error("initializer expression can have only up to 2 expressions in wasm 1.0")] + InitializerHasTooManyExpressions, + + #[error("Invalid initializer expression provided {0}")] + InvalidInitializerExpression(String), } impl wasmi::HostError for Error {} @@ -121,9 +118,9 @@ impl From<&'static str> for Error { } } -impl From for Error { - fn from(err: WasmError) -> Error { - Error::Other(err.to_string()) +impl From for Error { + fn from(err: String) -> Error { + Error::Other(err) } } @@ -151,3 +148,5 @@ pub enum WasmError { /// Other error happenend. Other(String), } + +impl std::error::Error for WasmError {} diff --git a/client/executor/common/src/lib.rs b/client/executor/common/src/lib.rs index 7f3864e6152fb..050bad27d6c30 100644 --- a/client/executor/common/src/lib.rs +++ b/client/executor/common/src/lib.rs @@ -1,22 +1,25 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! A set of common definitions that are needed for defining execution engines. #![warn(missing_docs)] +#![deny(unused_crate_dependencies)] pub mod error; pub mod sandbox; diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index b2c35b7582718..8ed294bb83983 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 92a48e1401814..5947be4469cd0 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -87,15 +87,12 @@ impl DataSegmentsSnapshot { let init_expr = match segment.offset() { Some(offset) => offset.code(), // Return if the segment is passive - None => return Err(Error::from("Shared memory is not supported".to_string())), + None => return Err(Error::SharedMemUnsupported), }; // [op, End] if init_expr.len() != 2 { - return Err(Error::from( - "initializer expression can have only up to 2 expressions in wasm 1.0" - .to_string(), - )); + return Err(Error::InitializerHasTooManyExpressions); } let offset = match &init_expr[0] { Instruction::I32Const(v) => *v as u32, @@ -106,15 +103,10 @@ impl DataSegmentsSnapshot { // At the moment of writing the Substrate Runtime Interface does not provide // any globals. There is nothing that prevents us from supporting this // if/when we gain those. - return Err(Error::from( - "Imported globals are not supported yet".to_string(), - )); + return Err(Error::ImportedGlobalsUnsupported); } insn => { - return Err(Error::from(format!( - "{:?} is not supported as initializer expression in wasm 1.0", - insn - ))) + return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))) } }; diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index c407d9967cbf9..cca0d99c4b91c 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Definitions for a wasm runtime. diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index ba23e31febee5..93ad463be16c3 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -13,16 +13,16 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-allocator = { version = "2.0.0", default-features = false, path = "../../../primitives/allocator" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-sandbox = { version = "0.8.0", default-features = false, path = "../../../primitives/sandbox" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-tasks = { version = "2.0.0", default-features = false, path = "../../../primitives/tasks" } +sp-allocator = { version = "3.0.0", default-features = false, path = "../../../primitives/allocator" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "3.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-sandbox = { version = "0.9.0", default-features = false, path = "../../../primitives/sandbox" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-tasks = { version = "3.0.0", default-features = false, path = "../../../primitives/tasks" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/client/executor/runtime-test/build.rs b/client/executor/runtime-test/build.rs index bc07db900c31e..9456d6bc90f4c 100644 --- a/client/executor/runtime-test/build.rs +++ b/client/executor/runtime-test/build.rs @@ -1,26 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { // regular build WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build(); @@ -28,10 +29,9 @@ fn main() { // and building with tracing activated WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .set_file_name("wasm_binary_with_tracing.rs") - .append_to_rust_flags("--cfg feature=\\\"with-tracing\\\"") + .append_to_rust_flags(r#"--cfg feature="with-tracing""#) .build(); } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 404530c1c3ebf..bfba4ef039395 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -4,8 +4,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") @@ -261,6 +261,17 @@ sp_core::wasm_export_functions! { wasm_tracing::exit(span_id) } + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + fn returns_mutable_static() -> u64 { unsafe { MUTABLE_STATIC += 1; diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index c8b763a6b1936..b28e3ca2436b6 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -26,7 +26,6 @@ use sp_core::{ }; use sc_runtime_test::wasm_binary_unwrap; use sp_state_machine::TestExternalities as CoreTestExternalities; -use test_case::test_case; use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_wasm_interface::HostFunctions as _; use sp_runtime::traits::BlakeTwo256; @@ -37,6 +36,34 @@ use crate::WasmExecutionMethod; pub type TestExternalities = CoreTestExternalities; type HostFunctions = sp_io::SubstrateHostFunctions; +/// Simple macro that runs a given method as test with the available wasm execution methods. +#[macro_export] +macro_rules! test_wasm_execution { + ($method_name:ident) => { + paste::item! { + #[test] + fn [<$method_name _interpreted>]() { + $method_name(WasmExecutionMethod::Interpreted); + } + + #[test] + #[cfg(feature = "wasmtime")] + fn [<$method_name _compiled>]() { + $method_name(WasmExecutionMethod::Compiled); + } + } + }; + + (interpreted_only $method_name:ident) => { + paste::item! { + #[test] + fn [<$method_name _interpreted>]() { + $method_name(WasmExecutionMethod::Interpreted); + } + } + }; +} + fn call_in_wasm( function: &str, call_data: &[u8], @@ -48,6 +75,7 @@ fn call_in_wasm( Some(1024), HostFunctions::host_functions(), 8, + None, ); executor.call_in_wasm( &wasm_binary_unwrap()[..], @@ -59,8 +87,7 @@ fn call_in_wasm( ) } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(returning_should_work); fn returning_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -74,8 +101,7 @@ fn returning_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, vec![0u8; 0]); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(call_not_existing_function); fn call_not_existing_function(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -102,8 +128,7 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { } } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(call_yet_another_not_existing_function); fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -130,8 +155,7 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { } } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(panicking_should_work); fn panicking_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -161,8 +185,7 @@ fn panicking_should_work(wasm_method: WasmExecutionMethod) { assert!(output.is_err()); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(storage_should_work); fn storage_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); @@ -191,8 +214,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(ext, expected); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(clear_prefix_should_work); fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); { @@ -225,8 +247,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(expected, ext); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(blake2_256_should_work); fn blake2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -250,8 +271,7 @@ fn blake2_256_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(blake2_128_should_work); fn blake2_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -275,8 +295,7 @@ fn blake2_128_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sha2_256_should_work); fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -306,8 +325,7 @@ fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(twox_256_should_work); fn twox_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -335,8 +353,7 @@ fn twox_256_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(twox_128_should_work); fn twox_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -360,8 +377,7 @@ fn twox_128_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(ed25519_verify_should_work); fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -397,8 +413,7 @@ fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sr25519_verify_should_work); fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -434,8 +449,7 @@ fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(ordered_trie_root_should_work); fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; @@ -450,8 +464,7 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(offchain_index); fn offchain_index(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let (offchain, _state) = testing::TestOffchainExt::new(); @@ -463,20 +476,15 @@ fn offchain_index(wasm_method: WasmExecutionMethod) { &mut ext.ext(), ).unwrap(); - use sp_core::offchain::storage::OffchainOverlayedChange; - assert_eq!( - ext.ext() - .get_offchain_storage_changes() - .get(sp_core::offchain::STORAGE_PREFIX, b"k"), - Some(OffchainOverlayedChange::SetValue(b"v".to_vec())) - ); + use sp_core::offchain::OffchainOverlayedChange; + let data = ext.overlayed_changes().clone().offchain_drain_committed().find(|(k, _v)| { + k == &(sp_core::offchain::STORAGE_PREFIX.to_vec(), b"k".to_vec()) + }); + assert_eq!(data.map(|data| data.1), Some(OffchainOverlayedChange::SetValue(b"v".to_vec()))); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(offchain_local_storage_should_work); fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { - use sp_core::offchain::OffchainStorage; - let mut ext = TestExternalities::default(); let (offchain, state) = testing::TestOffchainExt::new(); ext.register_extension(OffchainExt::new(offchain)); @@ -489,11 +497,10 @@ fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { ).unwrap(), true.encode(), ); - assert_eq!(state.read().persistent_storage.get(b"", b"test"), Some(vec![])); + assert_eq!(state.read().persistent_storage.get(b"test"), Some(vec![])); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(offchain_http_should_work); fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let (offchain, state) = testing::TestOffchainExt::new(); @@ -521,9 +528,7 @@ fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] -#[should_panic(expected = "Allocator ran out of space")] +test_wasm_execution!(should_trap_when_heap_exhausted); fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); @@ -532,19 +537,22 @@ fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { Some(17), // `17` is the initial number of pages compiled into the binary. HostFunctions::host_functions(), 8, + None, ); - executor.call_in_wasm( + + let err = executor.call_in_wasm( &wasm_binary_unwrap()[..], None, "test_exhaust_heap", &[0], &mut ext.ext(), sp_core::traits::MissingHostFunctions::Allow, - ).unwrap(); + ).unwrap_err(); + + assert!(err.contains("Allocator ran out of space")); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(returns_mutable_static); fn returns_mutable_static(wasm_method: WasmExecutionMethod) { let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( wasm_method, @@ -552,6 +560,7 @@ fn returns_mutable_static(wasm_method: WasmExecutionMethod) { &wasm_binary_unwrap()[..], HostFunctions::host_functions(), true, + None, ).expect("Creates runtime"); let instance = runtime.new_instance().unwrap(); @@ -569,8 +578,7 @@ fn returns_mutable_static(wasm_method: WasmExecutionMethod) { // returned to its initial value and thus the stack space is going to be leaked. // // See https://github.com/paritytech/substrate/issues/2967 for details -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(restoration_of_globals); fn restoration_of_globals(wasm_method: WasmExecutionMethod) { // Allocate 32 pages (of 65536 bytes) which gives the runtime 2048KB of heap to operate on // (plus some additional space unused from the initial pages requested by the wasm runtime @@ -586,6 +594,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { &wasm_binary_unwrap()[..], HostFunctions::host_functions(), true, + None, ).expect("Creates runtime"); let instance = runtime.new_instance().unwrap(); @@ -598,7 +607,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { assert!(res.is_ok()); } -#[test_case(WasmExecutionMethod::Interpreted)] +test_wasm_execution!(interpreted_only heap_is_reset_between_calls); fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( wasm_method, @@ -606,6 +615,7 @@ fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { &wasm_binary_unwrap()[..], HostFunctions::host_functions(), true, + None, ).expect("Creates runtime"); let instance = runtime.new_instance().unwrap(); @@ -622,14 +632,14 @@ fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { instance.call_export("check_and_set_in_heap", ¶ms).unwrap(); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(parallel_execution); fn parallel_execution(wasm_method: WasmExecutionMethod) { let executor = std::sync::Arc::new(crate::WasmExecutor::new( wasm_method, Some(1024), HostFunctions::host_functions(), 8, + None, )); let code_hash = blake2_256(wasm_binary_unwrap()).to_vec(); let threads: Vec<_> = (0..8).map(|_| @@ -658,7 +668,7 @@ fn parallel_execution(wasm_method: WasmExecutionMethod) { } } -#[test_case(WasmExecutionMethod::Interpreted)] +test_wasm_execution!(wasm_tracing_should_work); fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { use std::sync::{Arc, Mutex}; @@ -719,12 +729,19 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(span_datum.target, "default"); assert_eq!(span_datum.name, ""); assert_eq!(values.bool_values.get("wasm").unwrap(), &true); + + call_in_wasm( + "test_nested_spans", + Default::default(), + wasm_method, + &mut ext, + ).unwrap(); + let len = traces.lock().unwrap().len(); + assert_eq!(len, 2); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(spawning_runtime_instance_should_work); fn spawning_runtime_instance_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -736,10 +753,8 @@ fn spawning_runtime_instance_should_work(wasm_method: WasmExecutionMethod) { ).unwrap(); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(spawning_runtime_instance_nested_should_work); fn spawning_runtime_instance_nested_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -751,10 +766,8 @@ fn spawning_runtime_instance_nested_should_work(wasm_method: WasmExecutionMethod ).unwrap(); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(panic_in_spawned_instance_panics_on_joining_its_result); fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); let mut ext = ext.ext(); diff --git a/client/executor/src/integration_tests/sandbox.rs b/client/executor/src/integration_tests/sandbox.rs index 447e395c2fb08..7ce9c94a2db8a 100644 --- a/client/executor/src/integration_tests/sandbox.rs +++ b/client/executor/src/integration_tests/sandbox.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,12 +18,11 @@ use super::{TestExternalities, call_in_wasm}; use crate::WasmExecutionMethod; +use crate::test_wasm_execution; use codec::Encode; -use test_case::test_case; -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sandbox_should_work); fn sandbox_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -60,8 +59,7 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sandbox_trap); fn sandbox_trap(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -87,8 +85,7 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(start_called); fn start_called(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -131,8 +128,7 @@ fn start_called(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(invoke_args); fn invoke_args(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -171,8 +167,7 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(return_val); fn return_val(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -199,8 +194,7 @@ fn return_val(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(unlinkable_module); fn unlinkable_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -225,8 +219,7 @@ fn unlinkable_module(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(corrupted_module); fn corrupted_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -245,8 +238,7 @@ fn corrupted_module(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(start_fn_ok); fn start_fn_ok(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -274,8 +266,7 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(start_fn_traps); fn start_fn_traps(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -304,8 +295,7 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(get_global_val_works); fn get_global_val_works(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 56a81b24b4076..c30015a86b20e 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -80,6 +80,7 @@ mod tests { Some(8), sp_io::SubstrateHostFunctions::host_functions(), 8, + None, ); let res = executor.call_in_wasm( &wasm_binary_unwrap()[..], diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 1da82313a2df9..cdfe349edabd8 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -26,6 +26,7 @@ use std::{ panic::{UnwindSafe, AssertUnwindSafe}, result, sync::{Arc, atomic::{AtomicU64, Ordering}, mpsc}, + path::PathBuf, }; use sp_version::{NativeVersion, RuntimeVersion}; @@ -102,6 +103,9 @@ pub struct WasmExecutor { cache: Arc, /// The size of the instances cache. max_runtime_instances: usize, + /// The path to a directory which the executor can leverage for a file cache, e.g. put there + /// compiled artifacts. + cache_path: Option, } impl WasmExecutor { @@ -112,19 +116,30 @@ impl WasmExecutor { /// `method` - Method used to execute Wasm code. /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. - /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// + /// `host_functions` - The set of host functions to be available for import provided by this + /// executor. + /// + /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. + /// + /// `cache_path` - A path to a directory where the executor can place its files for purposes of + /// caching. This may be important in cases when there are many different modules with the + /// compiled execution method is used. pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, host_functions: Vec<&'static dyn Function>, max_runtime_instances: usize, + cache_path: Option, ) -> Self { WasmExecutor { method, default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), host_functions: Arc::new(host_functions), - cache: Arc::new(RuntimeCache::new(max_runtime_instances)), + cache: Arc::new(RuntimeCache::new(max_runtime_instances, cache_path.clone())), max_runtime_instances, + cache_path, } } @@ -210,6 +225,7 @@ impl sp_core::traits::CallInWasm for WasmExecutor { &wasm_code, self.host_functions.to_vec(), allow_missing_host_functions, + self.cache_path.as_deref(), ) .map_err(|e| format!("Failed to create module: {:?}", e))?; @@ -258,15 +274,16 @@ impl NativeExecutor { default_heap_pages: Option, max_runtime_instances: usize, ) -> Self { - let mut host_functions = sp_io::SubstrateHostFunctions::host_functions(); + let mut host_functions = D::ExtendHostFunctions::host_functions(); // Add the custom host functions provided by the user. - host_functions.extend(D::ExtendHostFunctions::host_functions()); + host_functions.extend(sp_io::SubstrateHostFunctions::host_functions()); let wasm_executor = WasmExecutor::new( fallback_method, default_heap_pages, host_functions, max_runtime_instances, + None, ); NativeExecutor { diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 7288df35f31c4..4772471049703 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Traits and accessor functions for calling into the Substrate Wasm runtime. //! @@ -26,6 +28,7 @@ use codec::Decode; use sp_core::traits::{Externalities, RuntimeCode, FetchRuntimeCode}; use sp_version::RuntimeVersion; use std::panic::AssertUnwindSafe; +use std::path::{Path, PathBuf}; use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; use sp_wasm_interface::Function; @@ -150,14 +153,22 @@ pub struct RuntimeCache { runtimes: Mutex<[Option>; MAX_RUNTIMES]>, /// The size of the instances cache for each runtime. max_runtime_instances: usize, + cache_path: Option, } impl RuntimeCache { /// Creates a new instance of a runtimes cache. - pub fn new(max_runtime_instances: usize) -> RuntimeCache { + /// + /// `max_runtime_instances` specifies the number of runtime instances preserved in an in-memory + /// cache. + /// + /// `cache_path` allows to specify an optional directory where the executor can store files + /// for caching. + pub fn new(max_runtime_instances: usize, cache_path: Option) -> RuntimeCache { RuntimeCache { runtimes: Default::default(), max_runtime_instances, + cache_path, } } @@ -233,6 +244,7 @@ impl RuntimeCache { host_functions.into(), allow_missing_func_imports, self.max_runtime_instances, + self.cache_path.as_deref(), ); if let Err(ref err) = result { log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); @@ -269,22 +281,32 @@ pub fn create_wasm_runtime_with_code( code: &[u8], host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, + cache_path: Option<&Path>, ) -> Result, WasmError> { match wasm_method { - WasmExecutionMethod::Interpreted => + WasmExecutionMethod::Interpreted => { + // Wasmi doesn't have any need in a cache directory. + // + // We drop the cache_path here to silence warnings that cache_path is not used if compiling + // without the `wasmtime` flag. + drop(cache_path); + sc_executor_wasmi::create_runtime( code, heap_pages, host_functions, - allow_missing_func_imports - ).map(|runtime| -> Arc { Arc::new(runtime) }), + allow_missing_func_imports, + ) + .map(|runtime| -> Arc { Arc::new(runtime) }) + } #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => sc_executor_wasmtime::create_runtime( code, heap_pages, host_functions, - allow_missing_func_imports + allow_missing_func_imports, + cache_path, ).map(|runtime| -> Arc { Arc::new(runtime) }), } } @@ -317,6 +339,7 @@ fn create_versioned_wasm_runtime( host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, max_instances: usize, + cache_path: Option<&Path>, ) -> Result { #[cfg(not(target_os = "unknown"))] let time = std::time::Instant::now(); @@ -326,6 +349,7 @@ fn create_versioned_wasm_runtime( &code, host_functions, allow_missing_func_imports, + cache_path, )?; // Call to determine runtime version. diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index bf174bca2d466..cfe9dd7108cf2 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" wasmi = "0.6.2" -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0", path = "../common" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor-common = { version = "0.9.0", path = "../common" } +sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "3.0.0", path = "../../../primitives/runtime-interface" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 17b92e04950c9..e6a6ef3a61039 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! This crate provides an implementation of `WasmModule` that is baked by wasmi. diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 7a8aa1ff458f3..051b314e4498a 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,13 +16,13 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0", path = "../common" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } -wasmtime = "0.19" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor-common = { version = "0.9.0", path = "../common" } +sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "3.0.0", path = "../../../primitives/runtime-interface" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } +wasmtime = "0.22" pwasm-utils = "0.14.0" [dev-dependencies] diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index eeb7cb927167f..c1eb77ff81f34 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! This module defines `HostState` and `HostContext` structs which provide logic and state //! required for execution of host. @@ -232,7 +234,7 @@ impl<'a> Sandbox for HostContext<'a> { .map_err(|e| e.to_string()) } - fn memory_new(&mut self, initial: u32, maximum: MemoryId) -> sp_wasm_interface::Result { + fn memory_new(&mut self, initial: u32, maximum: u32) -> sp_wasm_interface::Result { self.sandbox_store .borrow_mut() .new_memory(initial, maximum) diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index add62df5cef45..08cedd434e366 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -44,15 +44,17 @@ pub fn resolve_imports( let mut externs = vec![]; let mut memory_import_index = None; for import_ty in module.imports() { + let name = import_name(&import_ty)?; + if import_ty.module() != "env" { return Err(WasmError::Other(format!( "host doesn't provide any imports from non-env module: {}:{}", import_ty.module(), - import_ty.name() + name, ))); } - let resolved = match import_ty.name() { + let resolved = match name { "memory" => { memory_import_index = Some(externs.len()); resolve_memory_import(store, &import_ty, heap_pages)? @@ -72,6 +74,16 @@ pub fn resolve_imports( }) } +/// When the module linking proposal is supported the import's name can be `None`. +/// Because we are not using this proposal we could safely unwrap the name. +/// However, we opt for an error in order to avoid panics at all costs. +fn import_name<'a, 'b: 'a>(import: &'a ImportType<'b>) -> Result<&'a str, WasmError> { + let name = import.name().ok_or_else(|| + WasmError::Other("The module linking proposal is not supported.".to_owned()) + )?; + Ok(name) +} + fn resolve_memory_import( store: &Store, import_ty: &ImportType, @@ -83,7 +95,7 @@ fn resolve_memory_import( return Err(WasmError::Other(format!( "this import must be of memory type: {}:{}", import_ty.module(), - import_ty.name() + import_name(&import_ty)?, ))) } }; @@ -116,49 +128,46 @@ fn resolve_func_import( host_functions: &[&'static dyn Function], allow_missing_func_imports: bool, ) -> Result { + let name = import_name(&import_ty)?; + let func_ty = match import_ty.ty() { ExternType::Func(func_ty) => func_ty, _ => { return Err(WasmError::Other(format!( "host doesn't provide any non function imports besides 'memory': {}:{}", import_ty.module(), - import_ty.name() + name, ))); } }; let host_func = match host_functions .iter() - .find(|host_func| host_func.name() == import_ty.name()) + .find(|host_func| host_func.name() == name) { Some(host_func) => host_func, None if allow_missing_func_imports => { - return Ok(MissingHostFuncHandler::new(import_ty).into_extern(store, &func_ty)); + return Ok(MissingHostFuncHandler::new(import_ty)?.into_extern(store, &func_ty)); } None => { return Err(WasmError::Other(format!( "host doesn't provide such function: {}:{}", import_ty.module(), - import_ty.name() + name, ))); } }; - if !signature_matches(&func_ty, &wasmtime_func_sig(*host_func)) { + if &func_ty != &wasmtime_func_sig(*host_func) { return Err(WasmError::Other(format!( "signature mismatch for: {}:{}", import_ty.module(), - import_ty.name() + name, ))); } Ok(HostFuncHandler::new(*host_func).into_extern(store)) } -/// Returns `true` if `lhs` and `rhs` represent the same signature. -fn signature_matches(lhs: &wasmtime::FuncType, rhs: &wasmtime::FuncType) -> bool { - lhs.params() == rhs.params() && lhs.results() == rhs.results() -} - /// This structure implements `Callable` and acts as a bridge between wasmtime and /// substrate host functions. struct HostFuncHandler { @@ -243,11 +252,11 @@ struct MissingHostFuncHandler { } impl MissingHostFuncHandler { - fn new(import_ty: &ImportType) -> Self { - Self { + fn new(import_ty: &ImportType) -> Result { + Ok(Self { module: import_ty.module().to_string(), - name: import_ty.name().to_string(), - } + name: import_name(import_ty)?.to_string(), + }) } fn into_extern(self, store: &Store, func_ty: &FuncType) -> Extern { @@ -263,22 +272,17 @@ impl MissingHostFuncHandler { } fn wasmtime_func_sig(func: &dyn Function) -> wasmtime::FuncType { - let params = func - .signature() + let signature = func.signature(); + let params = signature .args .iter() .cloned() - .map(into_wasmtime_val_type) - .collect::>() - .into_boxed_slice(); - let results = func - .signature() + .map(into_wasmtime_val_type); + let results = signature .return_value .iter() .cloned() - .map(into_wasmtime_val_type) - .collect::>() - .into_boxed_slice(); + .map(into_wasmtime_val_type); wasmtime::FuncType::new(params, results) } diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 089d8cb237b56..f0543a7ef9506 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -113,7 +113,7 @@ impl EntryPoint { ]) }, }) - .map(|results| + .map(|results| // the signature is checked to have i64 return type results[0].unwrap_i64() as u64 ) @@ -124,27 +124,28 @@ impl EntryPoint { } pub fn direct(func: wasmtime::Func) -> std::result::Result { - match (func.ty().params(), func.ty().results()) { - (&[wasmtime::ValType::I32, wasmtime::ValType::I32], &[wasmtime::ValType::I64]) => { - Ok(Self { func, call_type: EntryPointType::Direct }) - } - _ => { - Err("Invalid signature for direct entry point") - } + use wasmtime::ValType; + let entry_point = wasmtime::FuncType::new( + [ValType::I32, ValType::I32].iter().cloned(), + [ValType::I64].iter().cloned(), + ); + if func.ty() == entry_point { + Ok(Self { func, call_type: EntryPointType::Direct }) + } else { + Err("Invalid signature for direct entry point") } } pub fn wrapped(dispatcher: wasmtime::Func, func: u32) -> std::result::Result { - match (dispatcher.ty().params(), dispatcher.ty().results()) { - ( - &[wasmtime::ValType::I32, wasmtime::ValType::I32, wasmtime::ValType::I32], - &[wasmtime::ValType::I64], - ) => { - Ok(Self { func: dispatcher, call_type: EntryPointType::Wrapped(func) }) - }, - _ => { - Err("Invalid signature for wrapped entry point") - } + use wasmtime::ValType; + let entry_point = wasmtime::FuncType::new( + [ValType::I32, ValType::I32, ValType::I32].iter().cloned(), + [ValType::I64].iter().cloned(), + ); + if dispatcher.ty() == entry_point { + Ok(Self { func: dispatcher, call_type: EntryPointType::Wrapped(func) }) + } else { + Err("Invalid signature for wrapped entry point") } } } diff --git a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs index 42935d851d95c..a6b1ed394150d 100644 --- a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs +++ b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index 66e4e085235ac..db7776d4c5845 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . ///! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 965b067535721..64ad5a1f4e49f 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Defines the compiled Wasm runtime that uses Wasmtime internally. @@ -23,6 +25,7 @@ use crate::state_holder; use std::rc::Rc; use std::sync::Arc; +use std::path::Path; use sc_executor_common::{ error::{Result, WasmError}, wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}, @@ -117,20 +120,68 @@ impl WasmInstance for WasmtimeInstance { } } +/// Prepare a directory structure and a config file to enable wasmtime caching. +/// +/// In case of an error the caching will not be enabled. +fn setup_wasmtime_caching( + cache_path: &Path, + config: &mut Config, +) -> std::result::Result<(), String> { + use std::fs; + + let wasmtime_cache_root = cache_path.join("wasmtime"); + fs::create_dir_all(&wasmtime_cache_root) + .map_err(|err| format!("cannot create the dirs to cache: {:?}", err))?; + + // Canonicalize the path after creating the directories. + let wasmtime_cache_root = wasmtime_cache_root + .canonicalize() + .map_err(|err| format!("failed to canonicalize the path: {:?}", err))?; + + // Write the cache config file + let cache_config_path = wasmtime_cache_root.join("cache-config.toml"); + let config_content = format!( + "\ +[cache] +enabled = true +directory = \"{cache_dir}\" +", + cache_dir = wasmtime_cache_root.display() + ); + fs::write(&cache_config_path, config_content) + .map_err(|err| format!("cannot write the cache config: {:?}", err))?; + + config + .cache_config_load(cache_config_path) + .map_err(|err| format!("failed to parse the config: {:?}", err))?; + + Ok(()) +} + /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to /// machine code, which can be computationally heavy. +/// +/// The `cache_path` designates where this executor implementation can put compiled artifacts. pub fn create_runtime( code: &[u8], heap_pages: u64, host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, + cache_path: Option<&Path>, ) -> std::result::Result { // Create the engine, store and finally the module from the given code. let mut config = Config::new(); config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); + if let Some(cache_path) = cache_path { + if let Err(reason) = setup_wasmtime_caching(cache_path, &mut config) { + log::warn!( + "failed to setup wasmtime cache. Performance may degrade significantly: {}.", + reason, + ); + } + } let engine = Engine::new(&config); - let module_wrapper = ModuleWrapper::new(&engine, code) .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; diff --git a/client/executor/wasmtime/src/state_holder.rs b/client/executor/wasmtime/src/state_holder.rs index 711d3bb735d7c..0e2684cd25130 100644 --- a/client/executor/wasmtime/src/state_holder.rs +++ b/client/executor/wasmtime/src/state_holder.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index d2de95d4cc715..1437c6f8509bf 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -1,18 +1,20 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use std::ops::Range; diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml new file mode 100644 index 0000000000000..ca3ea94f38e86 --- /dev/null +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "A request-response protocol for handling grandpa warp sync requests" +name = "sc-finality-grandpa-warp-sync" +version = "0.8.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-network = { version = "0.9.0", path = "../network" } +sc-finality-grandpa = { version = "0.9.0", path = "../finality-grandpa" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sc-client-api = { version = "3.0.0", path = "../api" } +sc-service = { version = "0.9.0", path = "../service" } +futures = "0.3.8" +log = "0.4.11" +derive_more = "0.99.11" +codec = { package = "parity-scale-codec", version = "2.0.0" } +prost = "0.7" +num-traits = "0.2.14" +parking_lot = "0.11.1" diff --git a/client/finality-grandpa-warp-sync/src/lib.rs b/client/finality-grandpa-warp-sync/src/lib.rs new file mode 100644 index 0000000000000..f7ce59b1c168f --- /dev/null +++ b/client/finality-grandpa-warp-sync/src/lib.rs @@ -0,0 +1,163 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer via the +//! [`crate::request_responses::RequestResponsesBehaviour`]. + +use codec::Decode; +use sc_network::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; +use sc_client_api::Backend; +use sp_runtime::traits::NumberFor; +use futures::channel::{mpsc, oneshot}; +use futures::stream::StreamExt; +use log::debug; +use sp_runtime::traits::Block as BlockT; +use std::time::Duration; +use std::sync::Arc; +use sc_service::{SpawnTaskHandle, config::{Configuration, Role}}; +use sc_finality_grandpa::WarpSyncFragmentCache; + +/// Generates the appropriate [`RequestResponseConfig`] for a given chain configuration. +pub fn request_response_config_for_chain + 'static>( + config: &Configuration, + spawn_handle: SpawnTaskHandle, + backend: Arc, +) -> RequestResponseConfig + where NumberFor: sc_finality_grandpa::BlockNumberOps, +{ + let protocol_id = config.protocol_id(); + + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + generate_request_response_config(protocol_id.clone()) + } else { + // Allow both outgoing and incoming requests. + let (handler, request_response_config) = GrandpaWarpSyncRequestHandler::new( + protocol_id.clone(), + backend.clone(), + ); + spawn_handle.spawn("grandpa_warp_sync_request_handler", handler.run()); + request_response_config + } +} + +const LOG_TARGET: &str = "finality-grandpa-warp-sync-request-handler"; + +/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing incoming requests. +pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { + RequestResponseConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 32, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(10), + inbound_queue: None, + } +} + +/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/sync/warp"); + s +} + +#[derive(codec::Decode)] +struct Request { + begin: B::Hash +} + +/// Setting a large fragment limit, allowing client +/// to define it is possible. +const WARP_SYNC_FRAGMENTS_LIMIT: usize = 100; + +/// Number of item with justification in warp sync cache. +/// This should be customizable, but setting it to the max number of fragments +/// we return seems like a good idea until then. +const WARP_SYNC_CACHE_SIZE: usize = WARP_SYNC_FRAGMENTS_LIMIT; + +/// Handler for incoming grandpa warp sync requests from a remote peer. +pub struct GrandpaWarpSyncRequestHandler { + backend: Arc, + cache: Arc>>, + request_receiver: mpsc::Receiver, + _phantom: std::marker::PhantomData +} + +impl> GrandpaWarpSyncRequestHandler { + /// Create a new [`GrandpaWarpSyncRequestHandler`]. + pub fn new(protocol_id: ProtocolId, backend: Arc) -> (Self, RequestResponseConfig) { + let (tx, request_receiver) = mpsc::channel(20); + + let mut request_response_config = generate_request_response_config(protocol_id); + request_response_config.inbound_queue = Some(tx); + let cache = Arc::new(parking_lot::RwLock::new(WarpSyncFragmentCache::new(WARP_SYNC_CACHE_SIZE))); + + (Self { backend, request_receiver, cache, _phantom: std::marker::PhantomData }, request_response_config) + } + + fn handle_request( + &self, + payload: Vec, + pending_response: oneshot::Sender + ) -> Result<(), HandleRequestError> + where NumberFor: sc_finality_grandpa::BlockNumberOps, + { + let request = Request::::decode(&mut &payload[..])?; + + let mut cache = self.cache.write(); + let response = sc_finality_grandpa::prove_warp_sync( + self.backend.blockchain(), request.begin, Some(WARP_SYNC_FRAGMENTS_LIMIT), Some(&mut cache) + )?; + + pending_response.send(OutgoingResponse { + result: Ok(response), + reputation_changes: Vec::new(), + }).map_err(|_| HandleRequestError::SendResponse) + } + + /// Run [`GrandpaWarpSyncRequestHandler`]. + pub async fn run(mut self) + where NumberFor: sc_finality_grandpa::BlockNumberOps, + { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response) { + Ok(()) => debug!(target: LOG_TARGET, "Handled grandpa warp sync request from {}.", peer), + Err(e) => debug!( + target: LOG_TARGET, + "Failed to handle grandpa warp sync request from {}: {}", + peer, e, + ), + } + } + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + Client(sp_blockchain::Error), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 8966f5e8f657a..38f6acda05483 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,46 +16,47 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } -futures = "0.3.4" +fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } +futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.8" -parking_lot = "0.10.0" +parking_lot = "0.11.1" rand = "0.7.2" -parity-scale-codec = { version = "1.3.4", features = ["derive"] } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0", path = "../../client/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sc-keystore = { version = "2.0.0", path = "../keystore" } +parity-scale-codec = { version = "2.0.0", features = ["derive"] } +sp-application-crypto = { version = "3.0.0", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "3.0.0", path = "../../primitives/arithmetic" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.9.0", path = "../consensus/common" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sc-telemetry = { version = "3.0.0", path = "../telemetry" } +sc-keystore = { version = "3.0.0", path = "../keystore" } serde_json = "1.0.41" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0", path = "../network" } -sc-network-gossip = { version = "0.8.0", path = "../network-gossip" } -sp-finality-grandpa = { version = "2.0.0", path = "../../primitives/finality-grandpa" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } -pin-project = "0.4.6" +sc-client-api = { version = "3.0.0", path = "../api" } +sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sc-network = { version = "0.9.0", path = "../network" } +sc-network-gossip = { version = "0.9.0", path = "../network-gossip" } +sp-finality-grandpa = { version = "3.0.0", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +finality-grandpa = { version = "0.13.0", features = ["derive-codec"] } +pin-project = "1.0.4" +linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" -finality-grandpa = { version = "0.12.3", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8.0", path = "../network" } +finality-grandpa = { version = "0.13.0", features = ["derive-codec", "test-helpers"] } +sc-network = { version = "0.9.0", path = "../network" } sc-network-test = { version = "0.8.0", path = "../network/test" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sp-consensus-babe = { version = "0.9.0", path = "../../primitives/consensus/babe" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index c0c2ea8b27d88..58aa78a38b103 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa-rpc" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "RPC extensions for the GRANDPA finality gadget" repository = "https://github.com/paritytech/substrate/" @@ -9,31 +9,31 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" readme = "README.md" [dependencies] -sc-finality-grandpa = { version = "0.8.0", path = "../" } -sc-rpc = { version = "2.0.0", path = "../../rpc" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -jsonrpc-pubsub = "15.0.0" +sc-finality-grandpa = { version = "0.9.0", path = "../" } +sc-rpc = { version = "3.0.0", path = "../../rpc" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +finality-grandpa = { version = "0.13.0", features = ["derive-codec"] } +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" +jsonrpc-pubsub = "15.1.0" futures = { version = "0.3.4", features = ["compat"] } serde = { version = "1.0.105", features = ["derive"] } serde_json = "1.0.50" log = "0.4.8" derive_more = "0.99.2" -parity-scale-codec = { version = "1.3.0", features = ["derive"] } -sc-client-api = { version = "2.0.0", path = "../../api" } +parity-scale-codec = { version = "2.0.0", features = ["derive"] } +sc-client-api = { version = "3.0.0", path = "../../api" } [dev-dependencies] -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sc-block-builder = { version = "0.9.0", path = "../../block-builder" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-rpc = { version = "2.0.0", path = "../../rpc", features = ["test-helpers"] } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0", path = "../../../primitives/finality-grandpa" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +sc-rpc = { version = "3.0.0", path = "../../rpc", features = ["test-helpers"] } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-finality-grandpa = { version = "3.0.0", path = "../../../primitives/finality-grandpa" } +sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } lazy_static = "1.4" diff --git a/client/finality-grandpa/rpc/src/error.rs b/client/finality-grandpa/rpc/src/error.rs index 6464acbe10ea0..c812b78f3fd8e 100644 --- a/client/finality-grandpa/rpc/src/error.rs +++ b/client/finality-grandpa/rpc/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -30,7 +30,7 @@ pub enum Error { VoterStateReportsUnreasonablyLargeNumbers, /// GRANDPA prove finality failed. #[display(fmt = "GRANDPA prove finality rpc failed: {}", _0)] - ProveFinalityFailed(sp_blockchain::Error), + ProveFinalityFailed(sc_finality_grandpa::FinalityProofError), } /// The error codes returned by jsonrpc. diff --git a/client/finality-grandpa/rpc/src/finality.rs b/client/finality-grandpa/rpc/src/finality.rs index 1f288b86a0e46..cfd8f68e5ce60 100644 --- a/client/finality-grandpa/rpc/src/finality.rs +++ b/client/finality-grandpa/rpc/src/finality.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,18 +22,16 @@ use sc_finality_grandpa::FinalityProofProvider; use sp_runtime::traits::{Block as BlockT, NumberFor}; #[derive(Serialize, Deserialize)] -pub struct EncodedFinalityProofs(pub sp_core::Bytes); +pub struct EncodedFinalityProof(pub sp_core::Bytes); /// Local trait mainly to allow mocking in tests. pub trait RpcFinalityProofProvider { - /// Return finality proofs for the given authorities set id, if it is provided, otherwise the - /// current one will be used. + /// Prove finality for the given block number by returning a Justification for the last block of + /// the authority set. fn rpc_prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: u64, - ) -> Result, sp_blockchain::Error>; + block: NumberFor, + ) -> Result, sc_finality_grandpa::FinalityProofError>; } impl RpcFinalityProofProvider for FinalityProofProvider @@ -44,11 +42,9 @@ where { fn rpc_prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: u64, - ) -> Result, sp_blockchain::Error> { - self.prove_finality(begin, end, authorities_set_id) - .map(|x| x.map(|y| EncodedFinalityProofs(y.into()))) + block: NumberFor, + ) -> Result, sc_finality_grandpa::FinalityProofError> { + self.prove_finality(block) + .map(|x| x.map(|y| EncodedFinalityProof(y.into()))) } } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 172473ad6518b..204bea4c18e2c 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -37,9 +37,9 @@ mod notification; mod report; use sc_finality_grandpa::GrandpaJustificationStream; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, NumberFor}; -use finality::{EncodedFinalityProofs, RpcFinalityProofProvider}; +use finality::{EncodedFinalityProof, RpcFinalityProofProvider}; use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; use notification::JustificationNotification; @@ -48,7 +48,7 @@ type FutureResult = /// Provides RPC methods for interacting with GRANDPA. #[rpc] -pub trait GrandpaApi { +pub trait GrandpaApi { /// RPC Metadata type Metadata; @@ -82,15 +82,13 @@ pub trait GrandpaApi { id: SubscriptionId ) -> jsonrpc_core::Result; - /// Prove finality for the range (begin; end] hash. Returns None if there are no finalized blocks - /// unknown in the range. If no authorities set is provided, the current one will be attempted. + /// Prove finality for the given block number by returning the Justification for the last block + /// in the set and all the intermediary headers to link them together. #[rpc(name = "grandpa_proveFinality")] fn prove_finality( &self, - begin: Hash, - end: Hash, - authorities_set_id: Option, - ) -> FutureResult>; + block: Number, + ) -> FutureResult>; } /// Implements the GrandpaApi RPC trait for interacting with GRANDPA. @@ -127,7 +125,8 @@ impl } } -impl GrandpaApi +impl + GrandpaApi> for GrandpaRpcHandler where VoterState: ReportVoterState + Send + Sync + 'static, @@ -171,16 +170,9 @@ where fn prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: Option, - ) -> FutureResult> { - // If we are not provided a set_id, try with the current one. - let authorities_set_id = authorities_set_id - .unwrap_or_else(|| self.authority_set.get().0); - let result = self - .finality_proof_provider - .rpc_prove_finality(begin, end, authorities_set_id); + block: NumberFor, + ) -> FutureResult> { + let result = self.finality_proof_provider.rpc_prove_finality(block); let future = async move { result }.boxed(); Box::new( future @@ -204,7 +196,7 @@ mod tests { use sc_block_builder::BlockBuilder; use sc_finality_grandpa::{ report, AuthorityId, GrandpaJustificationSender, GrandpaJustification, - FinalityProofFragment, + FinalityProof, }; use sp_blockchain::HeaderBackend; use sp_consensus::RecordProof; @@ -223,7 +215,7 @@ mod tests { struct EmptyVoterState; struct TestFinalityProofProvider { - finality_proofs: Vec>, + finality_proof: Option>, } fn voters() -> HashSet { @@ -262,11 +254,15 @@ mod tests { impl RpcFinalityProofProvider for TestFinalityProofProvider { fn rpc_prove_finality( &self, - _begin: Block::Hash, - _end: Block::Hash, - _authoritites_set_id: u64, - ) -> Result, sp_blockchain::Error> { - Ok(Some(EncodedFinalityProofs(self.finality_proofs.encode().into()))) + _block: NumberFor + ) -> Result, sc_finality_grandpa::FinalityProofError> { + Ok(Some(EncodedFinalityProof( + self.finality_proof + .as_ref() + .expect("Don't call rpc_prove_finality without setting the FinalityProof") + .encode() + .into() + ))) } } @@ -308,12 +304,12 @@ mod tests { ) where VoterState: ReportVoterState + Send + Sync + 'static, { - setup_io_handler_with_finality_proofs(voter_state, Default::default()) + setup_io_handler_with_finality_proofs(voter_state, None) } fn setup_io_handler_with_finality_proofs( voter_state: VoterState, - finality_proofs: Vec>, + finality_proof: Option>, ) -> ( jsonrpc_core::MetaIoHandler, GrandpaJustificationSender, @@ -321,7 +317,7 @@ mod tests { VoterState: ReportVoterState + Send + Sync + 'static, { let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); - let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proofs }); + let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proof }); let handler = GrandpaRpcHandler::new( TestAuthoritySet, @@ -520,29 +516,24 @@ mod tests { #[test] fn prove_finality_with_test_finality_proof_provider() { - let finality_proofs = vec![FinalityProofFragment { + let finality_proof = FinalityProof { block: header(42).hash(), justification: create_justification().encode(), unknown_headers: vec![header(2)], - authorities_proof: None, - }]; + }; let (io, _) = setup_io_handler_with_finality_proofs( TestVoterState, - finality_proofs.clone(), + Some(finality_proof.clone()), ); - let request = "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[\ - \"0x0000000000000000000000000000000000000000000000000000000000000000\",\ - \"0x0000000000000000000000000000000000000000000000000000000000000001\",\ - 42\ - ],\"id\":1}"; + let request = + "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[42],\"id\":1}"; let meta = sc_rpc::Metadata::default(); let resp = io.handle_request_sync(request, meta); let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); let result: sp_core::Bytes = serde_json::from_value(resp["result"].take()).unwrap(); - let fragments: Vec> = - Decode::decode(&mut &result[..]).unwrap(); - assert_eq!(fragments, finality_proofs); + let finality_proof_rpc: FinalityProof
= Decode::decode(&mut &result[..]).unwrap(); + assert_eq!(finality_proof_rpc, finality_proof); } } diff --git a/client/finality-grandpa/rpc/src/notification.rs b/client/finality-grandpa/rpc/src/notification.rs index fd03a622b2196..4c9141be3631a 100644 --- a/client/finality-grandpa/rpc/src/notification.rs +++ b/client/finality-grandpa/rpc/src/notification.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/finality-grandpa/rpc/src/report.rs b/client/finality-grandpa/rpc/src/report.rs index a635728cb938a..0482d90f58f0a 100644 --- a/client/finality-grandpa/rpc/src/report.rs +++ b/client/finality-grandpa/rpc/src/report.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 2de169fc8285a..067f6dfc1ae65 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -114,6 +114,11 @@ where N: Add + Ord + Clone + Debug, pub fn clone_inner(&self) -> AuthoritySet { self.inner.read().clone() } + + /// Clone the inner `AuthoritySetChanges`. + pub fn authority_set_changes(&self) -> AuthoritySetChanges { + self.inner.read().authority_set_changes.clone() + } } impl From> for SharedAuthoritySet { @@ -152,12 +157,16 @@ pub struct AuthoritySet { /// is lower than the last finalized block (as signaled in the forced /// change) must be applied beforehand. pending_forced_changes: Vec>, + /// Track at which blocks the set id changed. This is useful when we need to prove finality for a + /// given block since we can figure out what set the block belongs to and when the set + /// started/ended. + authority_set_changes: AuthoritySetChanges, } impl AuthoritySet where H: PartialEq, - N: Ord, + N: Ord + Clone, { // authority sets must be non-empty and all weights must be greater than 0 fn invalid_authority_list(authorities: &AuthorityList) -> bool { @@ -175,6 +184,7 @@ where set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }) } @@ -184,6 +194,7 @@ where set_id: u64, pending_standard_changes: ForkTree>, pending_forced_changes: Vec>, + authority_set_changes: AuthoritySetChanges, ) -> Option { if Self::invalid_authority_list(&authorities) { return None; @@ -194,6 +205,7 @@ where set_id, pending_standard_changes, pending_forced_changes, + authority_set_changes, }) } @@ -454,6 +466,9 @@ where "block" => ?change.canon_height ); + let mut authority_set_changes = self.authority_set_changes.clone(); + authority_set_changes.append(self.set_id, median_last_finalized.clone()); + new_set = Some(( median_last_finalized, AuthoritySet { @@ -461,6 +476,7 @@ where set_id: self.set_id + 1, pending_standard_changes: ForkTree::new(), // new set, new changes. pending_forced_changes: Vec::new(), + authority_set_changes, }, )); @@ -532,6 +548,9 @@ where "block" => ?change.canon_height ); + // Store the set_id together with the last block_number for the set + self.authority_set_changes.append(self.set_id, finalized_number.clone()); + self.current_authorities = change.next_authorities; self.set_id += 1; @@ -631,6 +650,45 @@ impl + Clone> PendingChange { } } +// Tracks historical authority set changes. We store the block numbers for the first block of each +// authority set, once they have been finalized. +#[derive(Debug, Encode, Decode, Clone, PartialEq)] +pub struct AuthoritySetChanges(pub Vec<(u64, N)>); + +impl AuthoritySetChanges { + pub(crate) fn empty() -> Self { + Self(Default::default()) + } + + pub(crate) fn append(&mut self, set_id: u64, block_number: N) { + self.0.push((set_id, block_number)); + } + + pub(crate) fn get_set_id(&self, block_number: N) -> Option<(u64, N)> { + let idx = self.0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + .unwrap_or_else(|b| b); + if idx < self.0.len() { + let (set_id, block_number) = self.0[idx].clone(); + // To make sure we have the right set we need to check that the one before it also exists. + if idx > 0 { + let (prev_set_id, _) = self.0[idx - 1usize]; + if set_id != prev_set_id + 1u64 { + // Without the preceding set_id we don't have a well-defined start. + return None; + } + } else if set_id != 0 { + // If this is the first index, yet not the first set id then it's not well-defined + // that we are in the right set id. + return None; + } + Some((set_id, block_number)) + } else { + None + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -657,6 +715,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let change = |height| { @@ -704,6 +763,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let change_a = PendingChange { @@ -758,9 +818,10 @@ mod tests { authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); + // ordered by subtree depth assert_eq!( authorities.pending_changes().collect::>(), - vec![&change_b, &change_a, &change_c, &change_e, &change_d], + vec![&change_a, &change_c, &change_b, &change_e, &change_d], ); } @@ -771,6 +832,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -798,7 +860,7 @@ mod tests { assert_eq!( authorities.pending_changes().collect::>(), - vec![&change_b, &change_a], + vec![&change_a, &change_b], ); // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" @@ -819,6 +881,7 @@ mod tests { authorities.pending_changes().collect::>(), vec![&change_a], ); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); // finalizing "hash_d" will enact the change signaled at "hash_a" let status = authorities.apply_standard_changes( @@ -837,6 +900,7 @@ mod tests { assert_eq!(authorities.current_authorities, set_a); assert_eq!(authorities.set_id, 1); assert_eq!(authorities.pending_changes().count(), 0); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); } #[test] @@ -846,6 +910,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -888,6 +953,7 @@ mod tests { authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false), Err(Error::ForkTree(fork_tree::Error::UnfinalizedAncestor)) )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); let status = authorities.apply_standard_changes( "hash_b", @@ -901,6 +967,7 @@ mod tests { assert_eq!(authorities.current_authorities, set_a); assert_eq!(authorities.set_id, 1); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); // after finalizing `change_a` it should be possible to finalize `change_c` let status = authorities.apply_standard_changes( @@ -915,6 +982,7 @@ mod tests { assert_eq!(authorities.current_authorities, set_c); assert_eq!(authorities.set_id, 2); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 40)])); } #[test] @@ -924,6 +992,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -990,6 +1059,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -1073,6 +1143,7 @@ mod tests { set_id: 1, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges(vec![(0, 42)]), }, ) ); @@ -1086,6 +1157,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -1124,6 +1196,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; // effective at #15 @@ -1178,22 +1251,26 @@ mod tests { authorities.apply_forced_changes("hash_d45", 45, &static_is_descendent_of(true), false), Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(15)) )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); // we apply the first pending standard change at #15 authorities .apply_standard_changes("hash_a15", 15, &static_is_descendent_of(true), false) .unwrap(); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); // but the forced change still depends on the next standard change assert!(matches!( authorities.apply_forced_changes("hash_d", 45, &static_is_descendent_of(true), false), Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(20)) )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); // we apply the pending standard change at #20 authorities .apply_standard_changes("hash_b", 20, &static_is_descendent_of(true), false) .unwrap(); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 20)])); // afterwards the forced change at #45 can already be applied since it signals // that finality stalled at #31, and the next pending standard change is effective @@ -1210,9 +1287,11 @@ mod tests { set_id: 3, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges(vec![(0, 15), (1, 20), (2, 31)]), } ), ); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 20)])); } #[test] @@ -1224,6 +1303,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let new_set = current_authorities.clone(); @@ -1342,7 +1422,13 @@ mod tests { // empty authority lists are invalid assert_eq!(AuthoritySet::<(), ()>::genesis(vec![]), None); assert_eq!( - AuthoritySet::<(), ()>::new(vec![], 0, ForkTree::new(), Vec::new()), + AuthoritySet::<(), ()>::new( + vec![], + 0, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ), None, ); @@ -1361,7 +1447,8 @@ mod tests { invalid_authorities_weight.clone(), 0, ForkTree::new(), - Vec::new() + Vec::new(), + AuthoritySetChanges::empty(), ), None, ); @@ -1416,6 +1503,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let new_set = current_authorities.clone(); @@ -1511,4 +1599,32 @@ mod tests { "D" ); } + + #[test] + fn authority_set_changes_for_complete_data() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 41); + authority_set_changes.append(1, 81); + authority_set_changes.append(2, 121); + + assert_eq!(authority_set_changes.get_set_id(20), Some((0, 41))); + assert_eq!(authority_set_changes.get_set_id(40), Some((0, 41))); + assert_eq!(authority_set_changes.get_set_id(41), Some((0, 41))); + assert_eq!(authority_set_changes.get_set_id(42), Some((1, 81))); + assert_eq!(authority_set_changes.get_set_id(141), None); + } + + #[test] + fn authority_set_changes_for_incomplete_data() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(2, 41); + authority_set_changes.append(3, 81); + authority_set_changes.append(4, 121); + + assert_eq!(authority_set_changes.get_set_id(20), None); + assert_eq!(authority_set_changes.get_set_id(40), None); + assert_eq!(authority_set_changes.get_set_id(41), None); + assert_eq!(authority_set_changes.get_set_id(42), Some((3, 81))); + assert_eq!(authority_set_changes.get_set_id(141), None); + } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 4ed96d058ac6b..1ce3c7999f24c 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -1,23 +1,24 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Schema for stuff in the aux-db. use std::fmt::Debug; -use std::sync::Arc; use parity_scale_codec::{Encode, Decode}; use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; @@ -27,8 +28,9 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use log::{info, warn}; use sp_finality_grandpa::{AuthorityList, SetId, RoundNumber}; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; -use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; +use crate::authorities::{ + AuthoritySet, AuthoritySetChanges, SharedAuthoritySet, PendingChange, DelayKind, +}; use crate::environment::{ CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, }; @@ -38,9 +40,8 @@ const VERSION_KEY: &[u8] = b"grandpa_schema_version"; const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; const CONCLUDED_ROUNDS: &[u8] = b"grandpa_concluded_rounds"; const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -const CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; -const CURRENT_VERSION: u32 = 2; +const CURRENT_VERSION: u32 = 3; /// The voter set state. #[derive(Debug, Clone, Encode, Decode)] @@ -70,8 +71,9 @@ struct V0AuthoritySet { } impl Into> for V0AuthoritySet -where H: Clone + Debug + PartialEq, - N: Clone + Debug + Ord, +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, { fn into(self) -> AuthoritySet { let mut pending_standard_changes = ForkTree::new(); @@ -102,19 +104,46 @@ where H: Clone + Debug + PartialEq, self.set_id, pending_standard_changes, Vec::new(), + AuthoritySetChanges::empty(), ); authority_set.expect("current_authorities is non-empty and weights are non-zero; qed.") } } -pub(crate) fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { +impl Into> for V2AuthoritySet +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, +{ + fn into(self) -> AuthoritySet { + AuthoritySet::new( + self.current_authorities, + self.set_id, + self.pending_standard_changes, + self.pending_forced_changes, + AuthoritySetChanges::empty(), + ) + .expect("current_authorities is non-empty and weights are non-zero; qed.") + } +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V2AuthoritySet { + current_authorities: AuthorityList, + set_id: u64, + pending_standard_changes: ForkTree>, + pending_forced_changes: Vec>, +} + +pub(crate) fn load_decode( + backend: &B, + key: &[u8] +) -> ClientResult> { match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) - .map_err( - |e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e.what())), - ) + .map_err(|e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e))) .map(Some) } } @@ -122,18 +151,21 @@ pub(crate) fn load_decode(backend: &B, key: &[u8]) -> Cl /// Persistent data kept between runs. pub(crate) struct PersistentData { pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) set_state: SharedVoterSetState, } fn migrate_from_version0( backend: &B, genesis_round: &G, -) -> ClientResult>, - VoterSetState, -)>> where B: AuxStore, - G: Fn() -> RoundState>, +) -> ClientResult< + Option<( + AuthoritySet>, + VoterSetState, + )>, +> +where + B: AuxStore, + G: Fn() -> RoundState>, { CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]) @@ -146,18 +178,20 @@ fn migrate_from_version0( let new_set: AuthoritySet> = old_set.into(); backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; - let (last_round_number, last_round_state) = match load_decode::<_, V0VoterSetState>>( - backend, - SET_STATE_KEY, - )? { + let (last_round_number, last_round_state) = match load_decode::< + _, + V0VoterSetState>, + >(backend, SET_STATE_KEY)? + { Some((number, state)) => (number, state), None => (0, genesis_round()), }; let set_id = new_set.set_id; - let base = last_round_state.prevote_ghost - .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + let base = last_round_state.prevote_ghost.expect( + "state is for completed round; completed rounds must have a prevote ghost; qed." + ); let mut current_rounds = CurrentRounds::new(); current_rounds.insert(last_round_number + 1, HasVoted::No); @@ -187,11 +221,15 @@ fn migrate_from_version0( fn migrate_from_version1( backend: &B, genesis_round: &G, -) -> ClientResult>, - VoterSetState, -)>> where B: AuxStore, - G: Fn() -> RoundState>, +) -> ClientResult< + Option<( + AuthoritySet>, + VoterSetState, + )>, +> +where + B: AuxStore, + G: Fn() -> RoundState>, { CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]) @@ -259,44 +297,101 @@ fn migrate_from_version1( Ok(None) } +fn migrate_from_version2( + backend: &B, + genesis_round: &G, +) -> ClientResult< + Option<( + AuthoritySet>, + VoterSetState, + )>, +> +where + B: AuxStore, + G: Fn() -> RoundState>, +{ + CURRENT_VERSION.using_encoded(|s| + backend.insert_aux(&[(VERSION_KEY, s)], &[]) + )?; + + if let Some(old_set) = load_decode::<_, V2AuthoritySet>>( + backend, + AUTHORITY_SET_KEY, + )? { + let new_set: AuthoritySet> = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let set_state = match load_decode::<_, VoterSetState>( + backend, + SET_STATE_KEY, + )? { + Some(state) => state, + None => { + let state = genesis_round(); + let base = state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + VoterSetState::live( + new_set.set_id, + &new_set, + base, + ) + } + }; + + return Ok(Some((new_set, set_state))); + } + + Ok(None) +} + /// Load or initialize persistent data from backend. pub(crate) fn load_persistent( backend: &B, genesis_hash: Block::Hash, genesis_number: NumberFor, genesis_authorities: G, -) - -> ClientResult> - where - B: AuxStore, - G: FnOnce() -> ClientResult, +) -> ClientResult> +where + B: AuxStore, + G: FnOnce() -> ClientResult, { let version: Option = load_decode(backend, VERSION_KEY)?; - let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? - .unwrap_or_else(ConsensusChanges::>::empty); let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); match version { None => { - if let Some((new_set, set_state)) = migrate_from_version0::(backend, &make_genesis_round)? { + if let Some((new_set, set_state)) = + migrate_from_version0::(backend, &make_genesis_round)? + { return Ok(PersistentData { authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), }); } }, Some(1) => { - if let Some((new_set, set_state)) = migrate_from_version1::(backend, &make_genesis_round)? { + if let Some((new_set, set_state)) = + migrate_from_version1::(backend, &make_genesis_round)? + { return Ok(PersistentData { authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), }); } }, Some(2) => { + if let Some((new_set, set_state)) = + migrate_from_version2::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + set_state: set_state.into(), + }); + } + } + Some(3) => { if let Some(set) = load_decode::<_, AuthoritySet>>( backend, AUTHORITY_SET_KEY, @@ -321,7 +416,6 @@ pub(crate) fn load_persistent( return Ok(PersistentData { authority_set: set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), }); } @@ -359,7 +453,6 @@ pub(crate) fn load_persistent( Ok(PersistentData { authority_set: genesis_set.into(), set_state: genesis_state.into(), - consensus_changes: Arc::new(consensus_changes.into()), }) } @@ -372,7 +465,8 @@ pub(crate) fn update_authority_set( set: &AuthoritySet>, new_set: Option<&NewAuthoritySet>>, write_aux: F -) -> R where +) -> R +where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { // write new authority set state to disk. @@ -421,21 +515,10 @@ pub(crate) fn write_concluded_round( backend.insert_aux(&[(&key[..], round_data.encode().as_slice())], &[]) } -/// Update the consensus changes. -pub(crate) fn update_consensus_changes( - set: &ConsensusChanges, - write_aux: F -) -> R where - H: Encode + Clone, - N: Encode + Clone, - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, -{ - write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) -} - #[cfg(test)] -pub(crate) fn load_authorities(backend: &B) - -> Option> { +pub(crate) fn load_authorities( + backend: &B +) -> Option> { load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY) .expect("backend error") } @@ -494,10 +577,14 @@ mod test { assert_eq!( load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), + Some(3), ); - let PersistentData { authority_set, set_state, .. } = load_persistent::( + let PersistentData { + authority_set, + set_state, + .. + } = load_persistent::( &client, H256::random(), 0, @@ -511,6 +598,7 @@ mod test { set_id, ForkTree::new(), Vec::new(), + AuthoritySetChanges::empty(), ).unwrap(), ); @@ -555,6 +643,7 @@ mod test { set_id, ForkTree::new(), Vec::new(), + AuthoritySetChanges::empty(), ).unwrap(); let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone()); @@ -584,10 +673,14 @@ mod test { assert_eq!( load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), + Some(3), ); - let PersistentData { authority_set, set_state, .. } = load_persistent::( + let PersistentData { + authority_set, + set_state, + .. + } = load_persistent::( &client, H256::random(), 0, @@ -601,6 +694,7 @@ mod test { set_id, ForkTree::new(), Vec::new(), + AuthoritySetChanges::empty(), ).unwrap(), ); @@ -625,6 +719,79 @@ mod test { ); } + #[test] + fn load_decode_from_v2_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(AuthorityId::default(), 100)]; + let set_id = 3; + + { + let authority_set = V2AuthoritySet:: { + current_authorities: authorities.clone(), + set_id, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let genesis_state = (H256::random(), 32); + let voter_set_state: VoterSetState = + VoterSetState::live( + set_id, + &authority_set.clone().into(), // Note the conversion! + genesis_state + ); + + client.insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 2u32.encode().as_slice()), + ], + &[], + ).unwrap(); + } + + assert_eq!( + load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), + Some(2), + ); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ).unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), + Some(3), + ); + + let PersistentData { + authority_set, + .. + } = load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ).unwrap(); + + assert_eq!( + *authority_set.inner().read(), + AuthoritySet::new( + authorities.clone(), + set_id, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ).unwrap(), + ); + } + #[test] fn write_read_concluded_rounds() { let client = substrate_test_runtime_client::new(); @@ -645,7 +812,9 @@ mod test { round_number.using_encoded(|n| key.extend(n)); assert_eq!( - load_decode::<_, CompletedRound::>(&client, &key).unwrap(), + load_decode::<_, CompletedRound::>( + &client, &key + ).unwrap(), Some(completed_round), ); } diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 276529d555ffe..1e616f3fa3f17 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Gossip and politeness for polite-grandpa. //! @@ -368,7 +370,7 @@ pub(super) struct NeighborPacket { /// A versioned neighbor packet. #[derive(Debug, Encode, Decode)] pub(super) enum VersionedNeighborPacket { - #[codec(index = "1")] + #[codec(index = 1)] V1(NeighborPacket), } @@ -1413,7 +1415,7 @@ impl GossipValidator { } Err(e) => { message_name = None; - debug!(target: "afg", "Error decoding message: {}", e.what()); + debug!(target: "afg", "Error decoding message: {}", e); telemetry!(CONSENSUS_DEBUG; "afg.err_decoding_msg"; "" => ""); let len = std::cmp::min(i32::max_value() as usize, data.len()) as i32; diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 3daffcb9f2522..d502741465d23 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -68,7 +68,8 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; -pub use sp_finality_grandpa::GRANDPA_ENGINE_ID; +/// Name of the notifications protocol used by Grandpa. Must be registered towards the networking +/// in order for Grandpa to properly function. pub const GRANDPA_PROTOCOL_NAME: &'static str = "/paritytech/grandpa/1"; // cost scalars for reporting peers. @@ -215,9 +216,9 @@ impl> NetworkBridge { let validator = Arc::new(validator); let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( service.clone(), - GRANDPA_ENGINE_ID, GRANDPA_PROTOCOL_NAME, - validator.clone() + validator.clone(), + prometheus_registry, ))); { @@ -721,7 +722,7 @@ impl Sink> for OutgoingMessages ); // announce the block we voted on to our peers. - self.network.lock().announce(target_hash, Vec::new()); + self.network.lock().announce(target_hash, None); // propagate the message to peers let topic = round_topic::(self.round, self.set_id); @@ -846,7 +847,7 @@ fn check_catch_up( } Ok(()) - }; + } check_weight( voters, diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index dadd7deb57fca..377882ed5dd2d 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Periodic rebroadcast of neighbor packets. diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 1a773acd6d0fb..4abea991cec37 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Tests for the communication portion of the GRANDPA crate. @@ -24,10 +26,11 @@ use sc_network_gossip::Validator; use std::sync::Arc; use sp_keyring::Ed25519Keyring; use parity_scale_codec::Encode; -use sp_runtime::{ConsensusEngineId, traits::NumberFor}; +use sp_runtime::traits::NumberFor; use std::{borrow::Cow, pin::Pin, task::{Context, Poll}}; +use crate::communication::GRANDPA_PROTOCOL_NAME; use crate::environment::SharedVoterSetState; -use sp_finality_grandpa::{AuthorityList, GRANDPA_ENGINE_ID}; +use sp_finality_grandpa::AuthorityList; use super::gossip::{self, GossipValidator}; use super::{VoterSet, Round, SetId}; @@ -55,15 +58,17 @@ impl sc_network_gossip::Network for TestNetwork { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn disconnect_peer(&self, _: PeerId) {} + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn write_notification(&self, who: PeerId, _: ConsensusEngineId, message: Vec) { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {} + + fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} - - fn announce(&self, block: Hash, _associated_data: Vec) { + fn announce(&self, block: Hash, _associated_data: Option>) { let _ = self.sender.unbounded_send(Event::Announce(block)); } } @@ -86,7 +91,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { >::write_notification( self, who.clone(), - GRANDPA_ENGINE_ID, + GRANDPA_PROTOCOL_NAME.into(), data, ); } @@ -287,20 +292,20 @@ fn good_commit_leads_to_relay() { // Add the sending peer and send the commit let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), commit_to_send.clone().into())], }); // Add a random peer which will be the recipient of this message let receiver_id = sc_network::PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: receiver_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), role: ObservedRole::Full, }); @@ -319,7 +324,7 @@ fn good_commit_leads_to_relay() { sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: receiver_id, - messages: vec![(GRANDPA_ENGINE_ID, msg.encode().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), msg.encode().into())], }) }; @@ -434,12 +439,12 @@ fn bad_commit_leads_to_report() { Event::EventStream(sender) => { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), commit_to_send.clone().into())], }); true diff --git a/client/finality-grandpa/src/consensus_changes.rs b/client/finality-grandpa/src/consensus_changes.rs deleted file mode 100644 index 1ce7b551d0d7c..0000000000000 --- a/client/finality-grandpa/src/consensus_changes.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; - -/// Consensus-related data changes tracker. -#[derive(Clone, Debug, Encode, Decode)] -pub(crate) struct ConsensusChanges { - pending_changes: Vec<(N, H)>, -} - -impl ConsensusChanges { - /// Create empty consensus changes. - pub(crate) fn empty() -> Self { - ConsensusChanges { pending_changes: Vec::new(), } - } -} - -impl ConsensusChanges { - - /// Returns reference to all pending changes. - pub fn pending_changes(&self) -> &[(N, H)] { - &self.pending_changes - } - - /// Note unfinalized change of consensus-related data. - pub(crate) fn note_change(&mut self, at: (N, H)) { - let idx = self.pending_changes - .binary_search_by_key(&at.0, |change| change.0) - .unwrap_or_else(|i| i); - self.pending_changes.insert(idx, at); - } - - /// Finalize all pending consensus changes that are finalized by given block. - /// Returns true if there any changes were finalized. - pub(crate) fn finalize ::sp_blockchain::Result>>( - &mut self, - block: (N, H), - canonical_at_height: F, - ) -> ::sp_blockchain::Result<(bool, bool)> { - let (split_idx, has_finalized_changes) = self.pending_changes.iter() - .enumerate() - .take_while(|(_, &(at_height, _))| at_height <= block.0) - .fold((None, Ok(false)), |(_, has_finalized_changes), (idx, ref at)| - ( - Some(idx), - has_finalized_changes - .and_then(|has_finalized_changes| if has_finalized_changes { - Ok(has_finalized_changes) - } else { - canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) - }), - )); - - let altered_changes = split_idx.is_some(); - if let Some(split_idx) = split_idx { - self.pending_changes = self.pending_changes.split_off(split_idx + 1); - } - has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) - } -} - -/// Thread-safe consensus changes tracker reference. -pub(crate) type SharedConsensusChanges = Arc>>; diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 7f9e966c9acc2..5e4203b2a40f6 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,28 +16,28 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; +use std::marker::PhantomData; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use log::{debug, warn}; -use parity_scale_codec::{Decode, Encode}; use futures::prelude::*; use futures_timer::Delay; +use log::{debug, warn}; +use parity_scale_codec::{Decode, Encode}; use parking_lot::RwLock; -use std::marker::PhantomData; use sc_client_api::{backend::{Backend, apply_aux}, utils::is_descendent_of}; use finality_grandpa::{ BlockNumberOps, Error as GrandpaError, round::State as RoundState, voter, voter_set::VoterSet, }; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as ClientError}; +use sp_blockchain::HeaderMetadata; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, + Block as BlockT, Header as HeaderT, NumberFor, Zero, }; use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; @@ -50,7 +50,6 @@ use sp_consensus::SelectChain; use crate::authorities::{AuthoritySet, SharedAuthoritySet}; use crate::communication::Network as NetworkT; -use crate::consensus_changes::SharedConsensusChanges; use crate::notification::GrandpaJustificationSender; use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; @@ -331,7 +330,11 @@ impl HasVoted { /// A voter set state meant to be shared safely across multiple owners. #[derive(Clone)] pub struct SharedVoterSetState { + /// The inner shared `VoterSetState`. inner: Arc>>, + /// A tracker for the rounds that we are actively participating on (i.e. voting) + /// and the authority id under which we are doing it. + voting: Arc>>, } impl From> for SharedVoterSetState { @@ -343,7 +346,10 @@ impl From> for SharedVoterSetState { impl SharedVoterSetState { /// Create a new shared voter set tracker with the given state. pub(crate) fn new(state: VoterSetState) -> Self { - SharedVoterSetState { inner: Arc::new(RwLock::new(state)) } + SharedVoterSetState { + inner: Arc::new(RwLock::new(state)), + voting: Arc::new(RwLock::new(HashMap::new())), + } } /// Read the inner voter set state. @@ -351,6 +357,23 @@ impl SharedVoterSetState { self.inner.read() } + /// Get the authority id that we are using to vote on the given round, if any. + pub(crate) fn voting_on(&self, round: RoundNumber) -> Option { + self.voting.read().get(&round).cloned() + } + + /// Note that we started voting on the give round with the given authority id. + pub(crate) fn started_voting_on(&self, round: RoundNumber, local_id: AuthorityId) { + self.voting.write().insert(round, local_id); + } + + /// Note that we have finished voting on the given round. If we were voting on + /// the given round, the authority id that we were using to do it will be + /// cleared. + pub(crate) fn finished_voting_on(&self, round: RoundNumber) { + self.voting.write().remove(&round); + } + /// Return vote status information for the current round. pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { match &*self.inner.read() { @@ -416,7 +439,6 @@ pub(crate) struct Environment, SC, pub(crate) voters: Arc>, pub(crate) config: Config, pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) network: crate::communication::NetworkBridge, pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, @@ -471,7 +493,7 @@ where &self, equivocation: Equivocation>, ) -> Result<(), Error> { - if let Some(local_id) = local_authority_id(&self.voters, self.config.keystore.as_ref()) { + if let Some(local_id) = self.voter_set_state.voting_on(equivocation.round_number()) { if *equivocation.offender() == local_id { return Err(Error::Safety( "Refraining from sending equivocation report for our own equivocation.".into(), @@ -745,6 +767,17 @@ where HasVoted::No => HasVoted::No, }; + // NOTE: we cache the local authority id that we'll be using to vote on the + // given round. this is done to make sure we only check for available keys + // from the keystore in this method when beginning the round, otherwise if + // the keystore state changed during the round (e.g. a key was removed) it + // could lead to internal state inconsistencies in the voter environment + // (e.g. we wouldn't update the voter set state after prevoting since there's + // no local authority id). + if let Some(id) = local_id.as_ref() { + self.voter_set_state.started_voting_on(round, id.clone()); + } + // we can only sign when we have a local key in the authority set // and we have a reference to the keystore. let keystore = match (local_id.as_ref(), self.config.keystore.as_ref()) { @@ -783,10 +816,12 @@ where } } - fn proposed(&self, round: RoundNumber, propose: PrimaryPropose) -> Result<(), Self::Error> { - let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); - - let local_id = match local_id { + fn proposed( + &self, + round: RoundNumber, + propose: PrimaryPropose, + ) -> Result<(), Self::Error> { + let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; @@ -823,9 +858,7 @@ where } fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { - let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); - - let local_id = match local_id { + let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; @@ -884,9 +917,7 @@ where round: RoundNumber, precommit: Precommit, ) -> Result<(), Self::Error> { - let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); - - let local_id = match local_id { + let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; @@ -1010,6 +1041,9 @@ where Ok(Some(set_state)) })?; + // clear any cached local authority id associated with this round + self.voter_set_state.finished_voting_on(round); + Ok(()) } @@ -1079,7 +1113,6 @@ where finalize_block( self.client.clone(), &self.authority_set, - &self.consensus_changes, Some(self.config.justification_period.into()), hash, number, @@ -1144,7 +1177,6 @@ impl From> for JustificationOrCommit< pub(crate) fn finalize_block( client: Arc, authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, justification_period: Option>, hash: Block::Hash, number: NumberFor, @@ -1179,15 +1211,6 @@ where // FIXME #1483: clone only when changed let old_authority_set = authority_set.clone(); - // holds the old consensus changes in case it is changed below, needed for - // reverting in case of failure - let mut old_consensus_changes = None; - - let mut consensus_changes = consensus_changes.lock(); - let canon_at_height = |canon_number| { - // "true" because the block is finalized - canonical_at_height(&*client, (hash, number), true, canon_number) - }; let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { let status = authority_set.apply_standard_changes( @@ -1197,26 +1220,6 @@ where initial_sync, ).map_err(|e| Error::Safety(e.to_string()))?; - // check if this is this is the first finalization of some consensus changes - let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes - .finalize((number, hash), &canon_at_height)?; - - if alters_consensus_changes { - old_consensus_changes = Some(consensus_changes.clone()); - - let write_result = crate::aux_schema::update_consensus_changes( - &*consensus_changes, - |insert| apply_aux(import_op, insert, &[]), - ); - - if let Err(e) = write_result { - warn!(target: "afg", "Failed to write updated consensus changes to disk. Bailing."); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - - return Err(e.into()); - } - } - // send a justification notification if a sender exists and in case of error log it. fn notify_justification( justification_sender: Option<&GrandpaJustificationSender>, @@ -1244,9 +1247,7 @@ where let mut justification_required = // justification is always required when block that enacts new authorities // set is finalized - status.new_set_block.is_some() || - // justification is required when consensus changes are finalized - finalizes_consensus_changes; + status.new_set_block.is_some(); // justification is required every N blocks to be able to prove blocks // finalization to remote nodes @@ -1351,57 +1352,7 @@ where Err(e) => { *authority_set = old_authority_set; - if let Some(old_consensus_changes) = old_consensus_changes { - *consensus_changes = old_consensus_changes; - } - Err(CommandOrError::Error(e)) } } } - -/// Using the given base get the block at the given height on this chain. The -/// target block must be an ancestor of base, therefore `height <= base.height`. -pub(crate) fn canonical_at_height>( - provider: &C, - base: (Block::Hash, NumberFor), - base_is_canonical: bool, - height: NumberFor, -) -> Result, ClientError> { - if height > base.1 { - return Ok(None); - } - - if height == base.1 { - if base_is_canonical { - return Ok(Some(base.0)); - } else { - return Ok(provider.hash(height).unwrap_or(None)); - } - } else if base_is_canonical { - return Ok(provider.hash(height).unwrap_or(None)); - } - - let one = NumberFor::::one(); - - // start by getting _canonical_ block with number at parent position and then iterating - // backwards by hash. - let mut current = match provider.header(BlockId::Number(base.1 - one))? { - Some(header) => header, - _ => return Ok(None), - }; - - // we've already checked that base > height above. - let mut steps = base.1 - height - one; - - while steps > NumberFor::::zero() { - current = match provider.header(BlockId::Hash(*current.parent_hash()))? { - Some(header) => header, - _ => return Ok(None), - }; - - steps -= one; - } - - Ok(Some(current.hash())) -} diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 33dd69cc11d6e..e1e424472ff98 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -36,556 +36,474 @@ //! finality proof (that finalizes some block C that is ancestor of the B and descendant //! of the U) could be returned. +use log::trace; use std::sync::Arc; -use log::{trace, warn}; -use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; -use sc_client_api::{ - backend::Backend, StorageProof, - light::{FetchChecker, RemoteReadRequest}, - StorageProvider, ProofProvider, -}; -use parity_scale_codec::{Encode, Decode}; use finality_grandpa::BlockNumberOps; +use parity_scale_codec::{Encode, Decode}; +use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sp_runtime::{ Justification, generic::BlockId, - traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, + traits::{NumberFor, Block as BlockT, Header as HeaderT, Zero, One}, }; -use sp_core::storage::StorageKey; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, GRANDPA_AUTHORITIES_KEY}; +use sc_client_api::backend::Backend; +use sp_finality_grandpa::{AuthorityId, AuthorityList}; +use crate::authorities::AuthoritySetChanges; use crate::justification::GrandpaJustification; +use crate::SharedAuthoritySet; use crate::VoterSet; -/// Maximum number of fragments that we want to return in a single prove_finality call. -const MAX_FRAGMENTS_IN_PROOF: usize = 8; - -/// GRANDPA authority set related methods for the finality proof provider. -pub trait AuthoritySetForFinalityProver: Send + Sync { - /// Read GRANDPA_AUTHORITIES_KEY from storage at given block. - fn authorities(&self, block: &BlockId) -> ClientResult; - /// Prove storage read of GRANDPA_AUTHORITIES_KEY at given block. - fn prove_authorities(&self, block: &BlockId) -> ClientResult; -} - -/// Trait that combines `StorageProvider` and `ProofProvider` -pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync - where - Block: BlockT, - BE: Backend + Send + Sync, -{} - -/// Blanket implementation. -impl StorageAndProofProvider for P - where - Block: BlockT, - BE: Backend + Send + Sync, - P: StorageProvider + ProofProvider + Send + Sync, -{} - -/// Implementation of AuthoritySetForFinalityProver. -impl AuthoritySetForFinalityProver for Arc> - where - BE: Backend + Send + Sync + 'static, -{ - fn authorities(&self, block: &BlockId) -> ClientResult { - let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); - self.storage(block, &storage_key)? - .and_then(|encoded| VersionedAuthorityList::decode(&mut encoded.0.as_slice()).ok()) - .map(|versioned| versioned.into()) - .ok_or(ClientError::InvalidAuthoritiesSet) - } - - fn prove_authorities(&self, block: &BlockId) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) - } -} - -/// GRANDPA authority set related methods for the finality proof checker. -pub trait AuthoritySetForFinalityChecker: Send + Sync { - /// Check storage read proof of GRANDPA_AUTHORITIES_KEY at given block. - fn check_authorities_proof( - &self, - hash: Block::Hash, - header: Block::Header, - proof: StorageProof, - ) -> ClientResult; -} - -/// FetchChecker-based implementation of AuthoritySetForFinalityChecker. -impl AuthoritySetForFinalityChecker for Arc> { - fn check_authorities_proof( - &self, - hash: Block::Hash, - header: Block::Header, - proof: StorageProof, - ) -> ClientResult { - let storage_key = GRANDPA_AUTHORITIES_KEY.to_vec(); - let request = RemoteReadRequest { - block: hash, - header, - keys: vec![storage_key.clone()], - retry_count: None, - }; - - self.check_read_proof(&request, proof) - .and_then(|results| { - let maybe_encoded = results.get(&storage_key) - .expect( - "storage_key is listed in the request keys; \ - check_read_proof must return a value for each requested key; - qed" - ); - maybe_encoded - .as_ref() - .and_then(|encoded| { - VersionedAuthorityList::decode(&mut encoded.as_slice()).ok() - }) - .map(|versioned| versioned.into()) - .ok_or(ClientError::InvalidAuthoritiesSet) - }) - } -} +const MAX_UNKNOWN_HEADERS: usize = 100_000; /// Finality proof provider for serving network requests. -pub struct FinalityProofProvider { - backend: Arc, - authority_provider: Arc>, +pub struct FinalityProofProvider { + backend: Arc, + shared_authority_set: Option>>, } impl FinalityProofProvider - where B: Backend + Send + Sync + 'static +where + B: Backend + Send + Sync + 'static, { /// Create new finality proof provider using: /// /// - backend for accessing blockchain data; /// - authority_provider for calling and proving runtime methods. - pub fn new

( + /// - shared_authority_set for accessing authority set data + pub fn new( backend: Arc, - authority_provider: P, - ) -> Self - where P: AuthoritySetForFinalityProver + 'static, - { - FinalityProofProvider { backend, authority_provider: Arc::new(authority_provider) } + shared_authority_set: Option>>, + ) -> Self { + FinalityProofProvider { + backend, + shared_authority_set, + } } /// Create new finality proof provider for the service using: /// /// - backend for accessing blockchain data; - /// - storage_and_proof_provider, which is generally a client. + /// - storage_provider, which is generally a client. + /// - shared_authority_set for accessing authority set data pub fn new_for_service( backend: Arc, - storage_and_proof_provider: Arc>, + shared_authority_set: Option>>, ) -> Arc { - Arc::new(Self::new(backend, storage_and_proof_provider)) + Arc::new(Self::new(backend, shared_authority_set)) } } impl FinalityProofProvider - where - Block: BlockT, - NumberFor: BlockNumberOps, - B: Backend + Send + Sync + 'static, +where + Block: BlockT, + NumberFor: BlockNumberOps, + B: Backend + Send + Sync + 'static, { - /// Prove finality for the range (begin; end] hash. Returns None if there are no finalized blocks - /// unknown in the range. + /// Prove finality for the given block number by returning a Justification for the last block of + /// the authority set. pub fn prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: u64, - ) -> Result>, ClientError> { + block: NumberFor + ) -> Result>, FinalityProofError> { + let authority_set_changes = if let Some(changes) = self + .shared_authority_set + .as_ref() + .map(SharedAuthoritySet::authority_set_changes) + { + changes + } else { + return Ok(None); + }; + prove_finality::<_, _, GrandpaJustification>( &*self.backend.blockchain(), - &*self.authority_provider, - authorities_set_id, - begin, - end, + authority_set_changes, + block, ) } } -impl sc_network::config::FinalityProofProvider for FinalityProofProvider - where - Block: BlockT, - NumberFor: BlockNumberOps, - B: Backend + Send + Sync + 'static, -{ - fn prove_finality( - &self, - for_block: Block::Hash, - request: &[u8], - ) -> Result>, ClientError> { - let request: FinalityProofRequest = Decode::decode(&mut &request[..]) - .map_err(|e| { - warn!(target: "afg", "Unable to decode finality proof request: {}", e.what()); - ClientError::Backend("Invalid finality proof request".to_string()) - })?; - match request { - FinalityProofRequest::Original(request) => prove_finality::<_, _, GrandpaJustification>( - &*self.backend.blockchain(), - &*self.authority_provider, - request.authorities_set_id, - request.last_finalized, - for_block, - ), - } - } -} - -/// The effects of block finality. -#[derive(Debug, PartialEq)] -pub struct FinalityEffects { - /// The (ordered) set of headers that could be imported. - pub headers_to_import: Vec

, - /// The hash of the block that could be finalized. - pub block: Header::Hash, - /// The justification for the block. - pub justification: Vec, - /// New authorities set id that should be applied starting from block. - pub new_set_id: u64, - /// New authorities set that should be applied starting from block. - pub new_authorities: AuthorityList, -} - -/// Single fragment of proof-of-finality. -/// /// Finality for block B is proved by providing: /// 1) the justification for the descendant block F; /// 2) headers sub-chain (B; F] if B != F; -/// 3) proof of GRANDPA::authorities() if the set changes at block F. #[derive(Debug, PartialEq, Encode, Decode, Clone)] -pub struct FinalityProofFragment { +pub struct FinalityProof { /// The hash of block F for which justification is provided. pub block: Header::Hash, /// Justification of the block F. pub justification: Vec, - /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. + /// The set of headers in the range (B; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, - /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, } -/// Proof of finality is the ordered set of finality fragments, where: -/// - last fragment provides justification for the best possible block from the requested range; -/// - all other fragments provide justifications for GRANDPA authorities set changes within requested range. -type FinalityProof
= Vec>; - -/// Finality proof request data. -#[derive(Debug, Encode, Decode)] -enum FinalityProofRequest { - /// Original version of the request. - Original(OriginalFinalityProofRequest), +/// Errors occurring when trying to prove finality +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum FinalityProofError { + /// The requested block has not yet been finalized. + #[display(fmt = "Block not yet finalized")] + BlockNotYetFinalized, + /// The requested block is not covered by authority set changes. Likely this means the block is + /// in the latest authority set, and the subscription API is more appropriate. + #[display(fmt = "Block not covered by authority set changes")] + BlockNotInAuthoritySetChanges, + /// Errors originating from the client. + Client(sp_blockchain::Error), } -/// Original version of finality proof request. -#[derive(Debug, Encode, Decode)] -struct OriginalFinalityProofRequest { - /// The authorities set id we are waiting proof from. - /// - /// The first justification in the proof must be signed by this authority set. - pub authorities_set_id: u64, - /// Hash of the last known finalized block. - pub last_finalized: H, +/// Single fragment of authority set proof. +/// +/// Finality for block B is proved by providing: +/// 1) headers of this block; +/// 2) the justification for the block containing a authority set change digest; +#[derive(Debug, PartialEq, Clone, Encode, Decode)] +pub(crate) struct AuthoritySetProofFragment { + /// The header of the given block. + pub header: Header, + /// Justification of the block F. + pub justification: Vec, } -/// Prepare data blob associated with finality proof request. -pub(crate) fn make_finality_proof_request(last_finalized: H, authorities_set_id: u64) -> Vec { - FinalityProofRequest::Original(OriginalFinalityProofRequest { - authorities_set_id, - last_finalized, - }).encode() -} +/// Proof of authority set is the ordered set of authority set fragments, where: +/// - last fragment match target block. +type AuthoritySetProof
= Vec>; -/// Prepare proof-of-finality for the best possible block in the range: (begin; end]. -/// -/// It is assumed that the caller already have a proof-of-finality for the block 'begin'. -/// It is assumed that the caller already knows all blocks in the range (begin; end]. -/// -/// Returns None if there are no finalized blocks unknown to the caller. -pub(crate) fn prove_finality, J>( +fn prove_finality( blockchain: &B, - authorities_provider: &dyn AuthoritySetForFinalityProver, - authorities_set_id: u64, - begin: Block::Hash, - end: Block::Hash, -) -> ::sp_blockchain::Result>> - where - J: ProvableJustification, + authority_set_changes: AuthoritySetChanges>, + block: NumberFor, +) -> Result>, FinalityProofError> +where + Block: BlockT, + B: BlockchainBackend, + J: ProvableJustification, { - let begin_id = BlockId::Hash(begin); - let begin_number = blockchain.expect_block_number_from_id(&begin_id)?; - - // early-return if we sure that there are no blocks finalized AFTER begin block + // Early-return if we sure that there are no blocks finalized AFTER begin block let info = blockchain.info(); - if info.finalized_number <= begin_number { - trace!( - target: "afg", - "Requested finality proof for descendant of #{} while we only have finalized #{}. Returning empty proof.", - begin_number, + if info.finalized_number <= block { + let err = format!( + "Requested finality proof for descendant of #{} while we only have finalized #{}.", + block, info.finalized_number, ); - - return Ok(None); - } - - // check if blocks range is valid. It is the caller responsibility to ensure - // that it only asks peers that know about whole blocks range - let end_number = blockchain.expect_block_number_from_id(&BlockId::Hash(end))?; - if begin_number + One::one() > end_number { - return Err(ClientError::Backend( - format!("Cannot generate finality proof for invalid range: {}..{}", begin_number, end_number), - )); + trace!(target: "afg", "{}", &err); + return Err(FinalityProofError::BlockNotYetFinalized); } - // early-return if we sure that the block is NOT a part of canonical chain - let canonical_begin = blockchain.expect_block_hash_from_id(&BlockId::Number(begin_number))?; - if begin != canonical_begin { - return Err(ClientError::Backend( - format!("Cannot generate finality proof for non-canonical block: {}", begin), - )); - } + // Get set_id the block belongs to, and the last block of the set which should contain a + // Justification we can use to prove the requested block. + let (_, last_block_for_set) = if let Some(id) = authority_set_changes.get_set_id(block) { + id + } else { + trace!( + target: "afg", + "AuthoritySetChanges does not cover the requested block #{}. \ + Maybe the subscription API is more appropriate.", + block, + ); + return Err(FinalityProofError::BlockNotInAuthoritySetChanges); + }; + + // Get the Justification stored at the last block of the set + let last_block_for_set_id = BlockId::Number(last_block_for_set); + let justification = + if let Some(justification) = blockchain.justification(last_block_for_set_id)? { + justification + } else { + trace!( + target: "afg", + "No justification found when making finality proof for {}. Returning empty proof.", + block, + ); + return Ok(None); + }; - // iterate justifications && try to prove finality - let mut fragment_index = 0; - let mut current_authorities = authorities_provider.authorities(&begin_id)?; - let mut current_number = begin_number + One::one(); - let mut finality_proof = Vec::new(); - let mut unknown_headers = Vec::new(); - let mut latest_proof_fragment = None; - let begin_authorities = current_authorities.clone(); - loop { - let current_id = BlockId::Number(current_number); - - // check if header is unknown to the caller - if current_number > end_number { - let unknown_header = blockchain.expect_header(current_id)?; - unknown_headers.push(unknown_header); + // Collect all headers from the requested block until the last block of the set + let unknown_headers = { + let mut headers = Vec::new(); + let mut current = block + One::one(); + loop { + if current >= last_block_for_set || headers.len() >= MAX_UNKNOWN_HEADERS { + break; + } + headers.push(blockchain.expect_header(BlockId::Number(current))?); + current += One::one(); } + headers + }; + + Ok(Some( + FinalityProof { + block: blockchain.expect_block_hash_from_id(&last_block_for_set_id)?, + justification, + unknown_headers, + } + .encode(), + )) +} - if let Some(justification) = blockchain.justification(current_id)? { - // check if the current block enacts new GRANDPA authorities set - let new_authorities = authorities_provider.authorities(¤t_id)?; - let new_authorities_proof = if current_authorities != new_authorities { - current_authorities = new_authorities; - Some(authorities_provider.prove_authorities(¤t_id)?) - } else { - None - }; - - // prepare finality proof for the current block - let current = blockchain.expect_block_hash_from_id(&BlockId::Number(current_number))?; - let proof_fragment = FinalityProofFragment { - block: current, - justification, - unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof, - }; - - // append justification to finality proof if required - let justifies_end_block = current_number >= end_number; - let justifies_authority_set_change = proof_fragment.authorities_proof.is_some(); - if justifies_end_block || justifies_authority_set_change { - // check if the proof is generated by the requested authority set - if finality_proof.is_empty() { - let justification_check_result = J::decode_and_verify( - &proof_fragment.justification, - authorities_set_id, - &begin_authorities, - ); - if justification_check_result.is_err() { - trace!( - target: "afg", - "Can not provide finality proof with requested set id #{}\ - (possible forced change?). Returning empty proof.", - authorities_set_id, - ); - - return Ok(None); - } - } - - finality_proof.push(proof_fragment); - latest_proof_fragment = None; +/// Prepare authority proof for the best possible block starting at a given trusted block. +/// +/// Started block should be in range of bonding duration. +/// We only return proof for finalized blocks (with justification). +/// +/// It is assumed that the caller already have a proof-of-finality for the block 'begin'. +pub fn prove_warp_sync>( + blockchain: &B, + begin: Block::Hash, + max_fragment_limit: Option, + mut cache: Option<&mut WarpSyncFragmentCache>, +) -> ::sp_blockchain::Result> { + + let begin = BlockId::Hash(begin); + let begin_number = blockchain.block_number_from_id(&begin)? + .ok_or_else(|| ClientError::Backend("Missing start block".to_string()))?; + let end = BlockId::Hash(blockchain.last_finalized()?); + let end_number = blockchain.block_number_from_id(&end)? + // This error should not happen, we could also panic. + .ok_or_else(|| ClientError::Backend("Missing last finalized block".to_string()))?; + + if begin_number > end_number { + return Err(ClientError::Backend("Unfinalized start for authority proof".to_string())); + } + + let mut result = Vec::new(); + let mut last_apply = None; + + let header = blockchain.expect_header(begin)?; + let mut index = *header.number(); + + // Find previous change in case there is a delay. + // This operation is a costy and only for the delay corner case. + while index > Zero::zero() { + index = index - One::one(); + if let Some((fragment, apply_block)) = get_warp_sync_proof_fragment(blockchain, index, &mut cache)? { + if last_apply.map(|next| &next > header.number()).unwrap_or(false) { + result.push(fragment); + last_apply = Some(apply_block); } else { - latest_proof_fragment = Some(proof_fragment); - } - - // we don't need to provide more justifications - if justifies_end_block { break; } } + } - // we can't provide more justifications - if current_number == info.finalized_number { - // append last justification - even if we can't generate finality proof for - // the end block, we try to generate it for the latest possible block - if let Some(latest_proof_fragment) = latest_proof_fragment.take() { - finality_proof.push(latest_proof_fragment); + let mut index = *header.number(); + while index <= end_number { + if max_fragment_limit.map(|limit| result.len() >= limit).unwrap_or(false) { + break; + } - fragment_index += 1; - if fragment_index == MAX_FRAGMENTS_IN_PROOF { - break; - } + if let Some((fragement, apply_block)) = get_warp_sync_proof_fragment(blockchain, index, &mut cache)? { + if last_apply.map(|next| apply_block < next).unwrap_or(false) { + // Previous delayed will not apply, do not include it. + result.pop(); } - break; + result.push(fragement); + last_apply = Some(apply_block); } - // else search for the next justification - current_number += One::one(); + index = index + One::one(); } - if finality_proof.is_empty() { - trace!( - target: "afg", - "No justifications found when making finality proof for {}. Returning empty proof.", - end, - ); + let at_limit = max_fragment_limit.map(|limit| result.len() >= limit).unwrap_or(false); - Ok(None) - } else { - trace!( - target: "afg", - "Built finality proof for {} of {} fragments. Last fragment for {}.", - end, - finality_proof.len(), - finality_proof.last().expect("checked that !finality_proof.is_empty(); qed").block, - ); + // add last finalized block if reached and not already included. + if !at_limit && result.last().as_ref().map(|head| head.header.number()) != Some(&end_number) { + let header = blockchain.expect_header(end)?; + if let Some(justification) = blockchain.justification(BlockId::Number(end_number.clone()))? { + result.push(AuthoritySetProofFragment { + header: header.clone(), + justification, + }); + } else { + // no justification, don't include it. + } + } + + Ok(result.encode()) +} + +/// Try get a warp sync proof fragment a a given finalized block. +fn get_warp_sync_proof_fragment>( + blockchain: &B, + index: NumberFor, + cache: &mut Option<&mut WarpSyncFragmentCache>, +) -> sp_blockchain::Result, NumberFor)>> { + if let Some(cache) = cache.as_mut() { + if let Some(result) = cache.get_item(index) { + return Ok(result); + } + } + + let mut result = None; + let header = blockchain.expect_header(BlockId::number(index))?; + + if let Some((block_number, sp_finality_grandpa::ScheduledChange { + next_authorities: _, + delay, + })) = crate::import::find_forced_change::(&header) { + let dest = block_number + delay; + if let Some(justification) = blockchain.justification(BlockId::Number(index.clone()))? { + result = Some((AuthoritySetProofFragment { + header: header.clone(), + justification, + }, dest)); + } else { + return Err(ClientError::Backend("Unjustified block with authority set change".to_string())); + } + } - Ok(Some(finality_proof.encode())) + if let Some(sp_finality_grandpa::ScheduledChange { + next_authorities: _, + delay, + }) = crate::import::find_scheduled_change::(&header) { + let dest = index + delay; + if let Some(justification) = blockchain.justification(BlockId::Number(index.clone()))? { + result = Some((AuthoritySetProofFragment { + header: header.clone(), + justification, + }, dest)); + } else { + return Err(ClientError::Backend("Unjustified block with authority set change".to_string())); + } } + + cache.as_mut().map(|cache| cache.new_item(index, result.clone())); + Ok(result) } -/// Check GRANDPA proof-of-finality for the given block. +/// Check GRANDPA authority change sequence to assert finality of a target block. /// -/// Returns the vector of headers that MUST be validated + imported -/// AND if at least one of those headers is invalid, all other MUST be considered invalid. -pub(crate) fn check_finality_proof( - blockchain: &B, +/// Returns the header of the target block. +#[allow(unused)] +pub(crate) fn check_warp_sync_proof( current_set_id: u64, current_authorities: AuthorityList, - authorities_provider: &dyn AuthoritySetForFinalityChecker, remote_proof: Vec, -) -> ClientResult> - where +) -> ClientResult<(Block::Header, u64, AuthorityList)> +where NumberFor: BlockNumberOps, - B: BlockchainBackend, - J: ProvableJustification, + J: Decode + ProvableJustification + BlockJustification, { // decode finality proof - let proof = FinalityProof::::decode(&mut &remote_proof[..]) - .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; + let proof = AuthoritySetProof::::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode authority proof".into()))?; - // empty proof can't prove anything - if proof.is_empty() { - return Err(ClientError::BadJustification("empty proof of finality".into())); - } + let last = proof.len() - 1; - // iterate and verify proof fragments - let last_fragment_index = proof.len() - 1; - let mut authorities = AuthoritiesOrEffects::Authorities(current_set_id, current_authorities); - for (proof_fragment_index, proof_fragment) in proof.into_iter().enumerate() { - // check that proof is non-redundant. The proof still can be valid, but - // we do not want peer to spam us with redundant data - if proof_fragment_index != last_fragment_index { - let has_unknown_headers = !proof_fragment.unknown_headers.is_empty(); - let has_new_authorities = proof_fragment.authorities_proof.is_some(); - if has_unknown_headers || !has_new_authorities { - return Err(ClientError::BadJustification("redundant proof of finality".into())); - } - } - - authorities = check_finality_proof_fragment::<_, _, J>( - blockchain, - authorities, - authorities_provider, - proof_fragment)?; - } + let mut result = (current_set_id, current_authorities, NumberFor::::zero()); - let effects = authorities.extract_effects().expect("at least one loop iteration is guaranteed - because proof is not empty;\ - check_finality_proof_fragment is called on every iteration;\ - check_finality_proof_fragment always returns FinalityEffects;\ - qed"); + for (ix, fragment) in proof.into_iter().enumerate() { + let is_last = ix == last; + result = check_warp_sync_proof_fragment::( + result.0, + &result.1, + &result.2, + is_last, + &fragment, + )?; - telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; - "set_id" => ?effects.new_set_id, "finalized_header_hash" => ?effects.block); + if is_last { + return Ok((fragment.header, result.0, result.1)) + } + } - Ok(effects) + // empty proof can't prove anything + return Err(ClientError::BadJustification("empty proof of authority".into())); } -/// Check finality proof for the single block. -fn check_finality_proof_fragment( - blockchain: &B, - authority_set: AuthoritiesOrEffects, - authorities_provider: &dyn AuthoritySetForFinalityChecker, - proof_fragment: FinalityProofFragment, -) -> ClientResult> - where +/// Check finality authority set sequence. +fn check_warp_sync_proof_fragment( + current_set_id: u64, + current_authorities: &AuthorityList, + previous_checked_block: &NumberFor, + is_last: bool, + authorities_proof: &AuthoritySetProofFragment, +) -> ClientResult<(u64, AuthorityList, NumberFor)> +where NumberFor: BlockNumberOps, - B: BlockchainBackend, - J: Decode + ProvableJustification, + J: Decode + ProvableJustification + BlockJustification, { - // verify justification using previous authorities set - let (mut current_set_id, mut current_authorities) = authority_set.extract_authorities(); - let justification: J = Decode::decode(&mut &proof_fragment.justification[..]) + let justification: J = Decode::decode(&mut authorities_proof.justification.as_slice()) .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(current_set_id, ¤t_authorities)?; + justification.verify(current_set_id, ¤t_authorities)?; - // and now verify new authorities proof (if provided) - if let Some(new_authorities_proof) = proof_fragment.authorities_proof { - // the proof is either generated using known header and it is safe to query header - // here, because its non-finality proves that it can't be pruned - // or it is generated using last unknown header (because it is the one who has - // justification => we only generate proofs for headers with justifications) - let header = match proof_fragment.unknown_headers.iter().rev().next().cloned() { - Some(header) => header, - None => blockchain.expect_header(BlockId::Hash(proof_fragment.block))?, - }; - current_authorities = authorities_provider.check_authorities_proof( - proof_fragment.block, - header, - new_authorities_proof, - )?; + // assert justification is for this header + if &justification.number() != authorities_proof.header.number() + || justification.hash().as_ref() != authorities_proof.header.hash().as_ref() { + return Err(ClientError::Backend("Invalid authority warp proof, justification do not match header".to_string())); + } - current_set_id += 1; - } + if authorities_proof.header.number() <= previous_checked_block { + return Err(ClientError::Backend("Invalid authority warp proof".to_string())); + } + let current_block = authorities_proof.header.number(); + let mut at_block = None; + if let Some(sp_finality_grandpa::ScheduledChange { + next_authorities, + delay, + }) = crate::import::find_scheduled_change::(&authorities_proof.header) { + let dest = *current_block + delay; + at_block = Some((dest, next_authorities)); + } + if let Some((block_number, sp_finality_grandpa::ScheduledChange { + next_authorities, + delay, + })) = crate::import::find_forced_change::(&authorities_proof.header) { + let dest = block_number + delay; + at_block = Some((dest, next_authorities)); + } - Ok(AuthoritiesOrEffects::Effects(FinalityEffects { - headers_to_import: proof_fragment.unknown_headers, - block: proof_fragment.block, - justification: proof_fragment.justification, - new_set_id: current_set_id, - new_authorities: current_authorities, - })) + // Fragment without change only allowed for proof last block. + if at_block.is_none() && !is_last { + return Err(ClientError::Backend("Invalid authority warp proof".to_string())); + } + if let Some((at_block, next_authorities)) = at_block { + Ok((current_set_id + 1, next_authorities, at_block)) + } else { + Ok((current_set_id, current_authorities.clone(), current_block.clone())) + } } -/// Authorities set from initial authorities set or finality effects. -enum AuthoritiesOrEffects { - Authorities(u64, AuthorityList), - Effects(FinalityEffects
), +/// Block info extracted from the justification. +pub(crate) trait BlockJustification { + /// Block number justified. + fn number(&self) -> Header::Number; + + /// Block hash justified. + fn hash(&self) -> Header::Hash; } -impl AuthoritiesOrEffects
{ - pub fn extract_authorities(self) -> (u64, AuthorityList) { - match self { - AuthoritiesOrEffects::Authorities(set_id, authorities) => (set_id, authorities), - AuthoritiesOrEffects::Effects(effects) => (effects.new_set_id, effects.new_authorities), - } - } +/// Check GRANDPA proof-of-finality for the given block. +/// +/// Returns the vector of headers that MUST be validated + imported +/// AND if at least one of those headers is invalid, all other MUST be considered invalid. +/// +/// This is currently not used, and exists primarily as an example of how to check finality proofs. +#[cfg(test)] +fn check_finality_proof( + current_set_id: u64, + current_authorities: AuthorityList, + remote_proof: Vec, +) -> ClientResult> +where + J: ProvableJustification
, +{ + let proof = FinalityProof::
::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; - pub fn extract_effects(self) -> Option> { - match self { - AuthoritiesOrEffects::Authorities(_, _) => None, - AuthoritiesOrEffects::Effects(effects) => Some(effects), - } - } + let justification: J = Decode::decode(&mut &proof.justification[..]) + .map_err(|_| ClientError::JustificationDecode)?; + justification.verify(current_set_id, ¤t_authorities)?; + + use sc_telemetry::{telemetry, CONSENSUS_INFO}; + telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; + "finalized_header_hash" => ?proof.block); + Ok(proof) } /// Justification used to prove block finality. -pub(crate) trait ProvableJustification: Encode + Decode { +pub trait ProvableJustification: Encode + Decode { /// Verify justification with respect to authorities set and authorities set id. fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()>; @@ -595,16 +513,16 @@ pub(crate) trait ProvableJustification: Encode + Decode { set_id: u64, authorities: &[(AuthorityId, u64)], ) -> ClientResult { - let justification = Self::decode(&mut &**justification) - .map_err(|_| ClientError::JustificationDecode)?; + let justification = + Self::decode(&mut &**justification).map_err(|_| ClientError::JustificationDecode)?; justification.verify(set_id, authorities)?; Ok(justification) } } impl ProvableJustification for GrandpaJustification - where - NumberFor: BlockNumberOps, +where + NumberFor: BlockNumberOps, { fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { let authorities = VoterSet::new(authorities.iter().cloned()).ok_or( @@ -615,45 +533,80 @@ impl ProvableJustification for GrandpaJustificatio } } -#[cfg(test)] -pub(crate) mod tests { - use substrate_test_runtime_client::runtime::{Block, Header, H256}; - use sc_client_api::NewBlockState; - use sc_client_api::in_mem::Blockchain as InMemoryBlockchain; - use super::*; - use sp_core::crypto::Public; +impl BlockJustification for GrandpaJustification { + fn number(&self) -> NumberFor { + self.commit.target_number.clone() + } + fn hash(&self) -> Block::Hash { + self.commit.target_hash.clone() + } +} - pub(crate) type FinalityProof = super::FinalityProof
; +/// Simple cache for warp sync queries. +pub struct WarpSyncFragmentCache { + header_has_proof_fragment: std::collections::HashMap, + cache: linked_hash_map::LinkedHashMap< + Header::Number, + (AuthoritySetProofFragment
, Header::Number), + >, + limit: usize, +} + +impl WarpSyncFragmentCache
{ + /// Instantiate a new cache for the warp sync prover. + pub fn new(size: usize) -> Self { + WarpSyncFragmentCache { + header_has_proof_fragment: Default::default(), + cache: Default::default(), + limit: size, + } + } + + fn new_item( + &mut self, + at: Header::Number, + item: Option<(AuthoritySetProofFragment
, Header::Number)>, + ) { + self.header_has_proof_fragment.insert(at, item.is_some()); + + if let Some(item) = item { + if self.cache.len() == self.limit { + self.pop_one(); + } - impl AuthoritySetForFinalityProver for (GetAuthorities, ProveAuthorities) - where - GetAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, - ProveAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, - { - fn authorities(&self, block: &BlockId) -> ClientResult { - self.0(*block) + self.cache.insert(at, item); } + } - fn prove_authorities(&self, block: &BlockId) -> ClientResult { - self.1(*block) + fn pop_one(&mut self) { + if let Some((header_number, _)) = self.cache.pop_front() { + self.header_has_proof_fragment.remove(&header_number); } } - pub(crate) struct ClosureAuthoritySetForFinalityChecker(pub Closure); - - impl AuthoritySetForFinalityChecker for ClosureAuthoritySetForFinalityChecker - where - Closure: Send + Sync + Fn(H256, Header, StorageProof) -> ClientResult, - { - fn check_authorities_proof( - &self, - hash: H256, - header: Header, - proof: StorageProof, - ) -> ClientResult { - self.0(hash, header, proof) + fn get_item( + &mut self, + block: Header::Number, + ) -> Option, Header::Number)>> { + match self.header_has_proof_fragment.get(&block) { + Some(true) => Some(self.cache.get_refresh(&block).cloned()), + Some(false) => Some(None), + None => None } } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::authorities::AuthoritySetChanges; + use sp_core::crypto::Public; + use sp_finality_grandpa::AuthorityList; + use sc_client_api::NewBlockState; + use sc_client_api::in_mem::Blockchain as InMemoryBlockchain; + use substrate_test_runtime_client::runtime::{Block, Header, H256}; + + pub(crate) type FinalityProof = super::FinalityProof
; #[derive(Debug, PartialEq, Encode, Decode)] pub struct TestJustification(pub (u64, AuthorityList), pub Vec); @@ -668,396 +621,366 @@ pub(crate) mod tests { } } + #[derive(Debug, PartialEq, Encode, Decode)] + pub struct TestBlockJustification(TestJustification, u64, H256); + + impl BlockJustification
for TestBlockJustification { + fn number(&self) ->
::Number { + self.1 + } + fn hash(&self) ->
::Hash { + self.2.clone() + } + } + + impl ProvableJustification
for TestBlockJustification { + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { + self.0.verify(set_id, authorities) + } + } + fn header(number: u64) -> Header { let parent_hash = match number { 0 => Default::default(), _ => header(number - 1).hash(), }; - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(0), parent_hash, Default::default()) - } - - fn side_header(number: u64) -> Header { Header::new( number, H256::from_low_u64_be(0), - H256::from_low_u64_be(1), - header(number - 1).hash(), - Default::default(), - ) - } - - fn second_side_header(number: u64) -> Header { - Header::new( - number, H256::from_low_u64_be(0), - H256::from_low_u64_be(1), - side_header(number - 1).hash(), + parent_hash, Default::default(), ) } fn test_blockchain() -> InMemoryBlockchain { let blockchain = InMemoryBlockchain::::new(); - blockchain.insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(2).hash(), header(2), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final).unwrap(); + blockchain + .insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(2).hash(), header(2), None, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final) + .unwrap(); blockchain } #[test] - fn finality_prove_fails_with_invalid_range() { - let blockchain = test_blockchain(); - - // their last finalized is: 2 - // they request for proof-of-finality of: 2 - // => range is invalid - prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(2).hash(), - header(2).hash(), - ).unwrap_err(); - } - - #[test] - fn finality_proof_is_none_if_no_more_last_finalized_blocks() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - - // our last finalized is: 3 - // their last finalized is: 3 - // => we can't provide any additional justifications - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert_eq!(proof_of_4, None); - } - - #[test] - fn finality_proof_fails_for_non_canonical_block() { + fn finality_proof_fails_if_no_more_last_finalized_blocks() { let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(side_header(4).hash(), side_header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(second_side_header(5).hash(), second_side_header(5), None, None, NewBlockState::Best) + blockchain + .insert(header(4).hash(), header(4), Some(vec![1]), None, NewBlockState::Best) + .unwrap(); + blockchain + .insert(header(5).hash(), header(5), Some(vec![2]), None, NewBlockState::Best) .unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(vec![5]), None, NewBlockState::Final).unwrap(); - - // chain is 1 -> 2 -> 3 -> 4 -> 5 - // \> 4' -> 5' - // and the best finalized is 5 - // => when requesting for (4'; 5'], error is returned - prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - side_header(4).hash(), - second_side_header(5).hash(), - ).unwrap_err(); - } - #[test] - fn finality_proof_is_none_if_no_justification_known() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); - // block 4 is finalized without justification - // => we can't prove finality + // The last finalized block is 3, so we cannot provide further justifications. let proof_of_4 = prove_finality::<_, _, TestJustification>( &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("authorities didn't change => ProveAuthorities won't be called"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert_eq!(proof_of_4, None); - } - - #[test] - fn finality_proof_works_without_authorities_change() { - let blockchain = test_blockchain(); - let authorities = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let just4 = TestJustification((0, authorities.clone()), vec![4]).encode(); - let just5 = TestJustification((0, authorities.clone()), vec![5]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); - - // blocks 4 && 5 are finalized with justification - // => since authorities are the same, we only need justification for 5 - let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(authorities.clone()), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(5).hash(), - ).unwrap().unwrap()[..]).unwrap(); - assert_eq!(proof_of_5, vec![FinalityProofFragment { - block: header(5).hash(), - justification: just5, - unknown_headers: Vec::new(), - authorities_proof: None, - }]); + authority_set_changes, + *header(4).number(), + ); + assert!(matches!(proof_of_4, Err(FinalityProofError::BlockNotYetFinalized))); } #[test] - fn finality_proof_finalized_earlier_block_if_no_justification_for_target_is_known() { + fn finality_proof_is_none_if_no_justification_known() { let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), Some(vec![4]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - - // block 4 is finalized with justification + we request for finality of 5 - // => we can't prove finality of 5, but providing finality for 4 is still useful for requester - let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(5).hash(), - ).unwrap().unwrap()[..]).unwrap(); - assert_eq!(proof_of_5, vec![FinalityProofFragment { - block: header(4).hash(), - justification: vec![4], - unknown_headers: Vec::new(), - authorities_proof: None, - }]); - } + blockchain + .insert(header(4).hash(), header(4), None, None, NewBlockState::Final) + .unwrap(); - #[test] - fn finality_proof_works_with_authorities_change() { - let blockchain = test_blockchain(); - let auth3 = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - let auth5 = vec![(AuthorityId::from_slice(&[5u8; 32]), 1u64)]; - let auth7 = vec![(AuthorityId::from_slice(&[7u8; 32]), 1u64)]; - let just4 = TestJustification((0, auth3.clone()), vec![4]).encode(); - let just5 = TestJustification((0, auth3.clone()), vec![5]).encode(); - let just7 = TestJustification((1, auth5.clone()), vec![7]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final).unwrap(); - - // when querying for finality of 6, we assume that the #3 is the last block known to the requester - // => since we only have justification for #7, we provide #7 - let proof_of_6: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |block_id| match block_id { - BlockId::Hash(h) if h == header(3).hash() => Ok(auth3.clone()), - BlockId::Number(4) => Ok(auth3.clone()), - BlockId::Number(5) => Ok(auth5.clone()), - BlockId::Number(7) => Ok(auth7.clone()), - _ => unreachable!("no other authorities should be fetched: {:?}", block_id), - }, - |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::new(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::new(vec![vec![70]])), - _ => unreachable!("no other authorities should be proved: {:?}", block_id), - }, - ), - 0, - header(3).hash(), - header(6).hash(), - ).unwrap().unwrap()[..]).unwrap(); - // initial authorities set (which start acting from #0) is [3; 32] - assert_eq!(proof_of_6, vec![ - // new authorities set starts acting from #5 => we do not provide fragment for #4 - // first fragment provides justification for #5 && authorities set that starts acting from #5 - FinalityProofFragment { - block: header(5).hash(), - justification: just5, - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![50]])), - }, - // last fragment provides justification for #7 && unknown#7 - FinalityProofFragment { - block: header(7).hash(), - justification: just7.clone(), - unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::new(vec![vec![70]])), - }, - ]); + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 4); - // now let's verify finality proof - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - let effects = check_finality_proof::<_, _, TestJustification>( + // Block 4 is finalized without justification + // => we can't prove finality of 3 + let proof_of_3 = prove_finality::<_, _, TestJustification>( &blockchain, - 0, - auth3, - &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().iter_nodes().next().map(|x| x[0]) { - Some(50) => Ok(auth5.clone()), - Some(70) => Ok(auth7.clone()), - _ => unreachable!("no other proofs should be checked: {}", hash), - } - ), - proof_of_6.encode(), - ).unwrap(); - - assert_eq!(effects, FinalityEffects { - headers_to_import: vec![header(7)], - block: header(7).hash(), - justification: TestJustification((1, auth5.clone()), vec![7]).encode(), - new_set_id: 2, - new_authorities: auth7, - }); + authority_set_changes, + *header(3).number(), + ) + .unwrap(); + assert_eq!(proof_of_3, None); } #[test] fn finality_proof_check_fails_when_proof_decode_fails() { - let blockchain = test_blockchain(); - - // when we can't decode proof from Vec - check_finality_proof::<_, _, TestJustification>( - &blockchain, + // When we can't decode proof from Vec + check_finality_proof::<_, TestJustification>( 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), vec![42], - ).unwrap_err(); + ) + .unwrap_err(); } #[test] fn finality_proof_check_fails_when_proof_is_empty() { - let blockchain = test_blockchain(); - - // when decoded proof has zero length - check_finality_proof::<_, _, TestJustification>( - &blockchain, + // When decoded proof has zero length + check_finality_proof::<_, TestJustification>( 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), Vec::::new().encode(), - ).unwrap_err(); + ) + .unwrap_err(); } #[test] - fn finality_proof_check_fails_when_intermediate_fragment_has_unknown_headers() { - let blockchain = test_blockchain(); - - // when intermediate (#0) fragment has non-empty unknown headers - let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - check_finality_proof::<_, _, TestJustification>( - &blockchain, + fn finality_proof_check_works() { + let auth = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; + let finality_proof = FinalityProof { + block: header(2).hash(), + justification: TestJustification((1, auth.clone()), vec![7]).encode(), + unknown_headers: Vec::new(), + }; + let proof = check_finality_proof::<_, TestJustification>( 1, - authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((0, authorities.clone()), vec![7]).encode(), - unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::new(vec![vec![42]])), - }, FinalityProofFragment { - block: header(5).hash(), - justification: TestJustification((0, authorities), vec![8]).encode(), - unknown_headers: vec![header(5)], - authorities_proof: None, - }].encode(), - ).unwrap_err(); + auth.clone(), + finality_proof.encode(), + ) + .unwrap(); + assert_eq!(proof, finality_proof); } #[test] - fn finality_proof_check_fails_when_intermediate_fragment_has_no_authorities_proof() { + fn finality_proof_using_authority_set_changes_fails_with_undefined_start() { let blockchain = test_blockchain(); + let auth = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; + let just4 = TestJustification((0, auth.clone()), vec![4]).encode(); + let just7 = TestJustification((1, auth.clone()), vec![7]).encode(); + blockchain + .insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(5).hash(), header(5), None, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(6).hash(), header(6), None, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final) + .unwrap(); + + // We have stored the correct block number for the relevant set, but as we are missing the + // block for the preceding set the start is not well-defined. + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(1, 7); - // when intermediate (#0) fragment has empty authorities proof - let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - check_finality_proof::<_, _, TestJustification>( + let proof_of_5 = prove_finality::<_, _, TestJustification>( &blockchain, - 1, - authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((0, authorities.clone()), vec![7]).encode(), - unknown_headers: Vec::new(), - authorities_proof: None, - }, FinalityProofFragment { - block: header(5).hash(), - justification: TestJustification((0, authorities), vec![8]).encode(), - unknown_headers: vec![header(5)], - authorities_proof: None, - }].encode(), - ).unwrap_err(); + authority_set_changes, + *header(5).number(), + ); + assert!(matches!(proof_of_5, Err(FinalityProofError::BlockNotInAuthoritySetChanges))); } #[test] - fn finality_proof_check_works() { + fn finality_proof_using_authority_set_changes_works() { let blockchain = test_blockchain(); + let auth = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; + let just4 = TestJustification((0, auth.clone()), vec![4]).encode(); + let just7 = TestJustification((1, auth.clone()), vec![7]).encode(); + blockchain + .insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(5).hash(), header(5), None, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(6).hash(), header(6), None, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final) + .unwrap(); - let initial_authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - let next_authorities = vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)]; - let effects = check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - initial_authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| Ok(next_authorities.clone())), - vec![FinalityProofFragment { - block: header(2).hash(), - justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![42]])), - }, FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), - unknown_headers: vec![header(4)], - authorities_proof: None, - }].encode(), - ).unwrap(); - assert_eq!(effects, FinalityEffects { - headers_to_import: vec![header(4)], - block: header(4).hash(), - justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), - new_set_id: 2, - new_authorities: vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)], - }); + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 4); + authority_set_changes.append(1, 7); + + let proof_of_5: FinalityProof = Decode::decode( + &mut &prove_finality::<_, _, TestJustification>( + &blockchain, + authority_set_changes, + *header(5).number(), + ) + .unwrap() + .unwrap()[..], + ) + .unwrap(); + assert_eq!( + proof_of_5, + FinalityProof { + block: header(7).hash(), + justification: just7, + unknown_headers: vec![header(6)], + } + ); } #[test] - fn finality_proof_is_none_if_first_justification_is_generated_by_unknown_set() { - // this is the case for forced change: set_id has been forcibly increased on full node - // and light node missed that - // => justification verification will fail on light node anyways, so we do not return - // finality proof at all - let blockchain = test_blockchain(); - let just4 = TestJustification((0, vec![(AuthorityId::from_slice(&[42u8; 32]), 1u64)]), vec![4]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); + fn warp_sync_proof_encoding_decoding() { + fn test_blockchain( + nb_blocks: u64, + mut set_change: &[(u64, Vec)], + mut justifications: &[(u64, Vec)], + ) -> (InMemoryBlockchain, Vec) { + let blockchain = InMemoryBlockchain::::new(); + let mut hashes = Vec::::new(); + let mut set_id = 0; + for i in 0..nb_blocks { + let mut set_id_next = set_id; + let mut header = header(i); + set_change.first() + .map(|j| if i == j.0 { + set_change = &set_change[1..]; + let next_authorities: Vec<_> = j.1.iter().map(|i| (AuthorityId::from_slice(&[*i; 32]), 1u64)).collect(); + set_id_next += 1; + header.digest_mut().logs.push( + sp_runtime::generic::DigestItem::Consensus( + sp_finality_grandpa::GRANDPA_ENGINE_ID, + sp_finality_grandpa::ConsensusLog::ScheduledChange( + sp_finality_grandpa::ScheduledChange { delay: 0u64, next_authorities } + ).encode(), + )); + }); + + if let Some(parent) = hashes.last() { + header.set_parent_hash(parent.clone()); + } + let header_hash = header.hash(); + + let justification = justifications.first() + .and_then(|j| if i == j.0 { + justifications = &justifications[1..]; + + let authority = j.1.iter().map(|j| + (AuthorityId::from_slice(&[*j; 32]), 1u64) + ).collect(); + let justification = TestBlockJustification( + TestJustification((set_id, authority), vec![i as u8]), + i, + header_hash, + ); + Some(justification.encode()) + } else { + None + }); + hashes.push(header_hash.clone()); + set_id = set_id_next; + + blockchain.insert(header_hash, header, justification, None, NewBlockState::Final) + .unwrap(); + } + (blockchain, hashes) + } - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(4).hash(), + let (blockchain, hashes) = test_blockchain( + 7, + vec![(3, vec![9])].as_slice(), + vec![ + (1, vec![1, 2, 3]), + (2, vec![1, 2, 3]), + (3, vec![1, 2, 3]), + (4, vec![9]), + (6, vec![9]), + ].as_slice(), + ); + + // proof after set change + let mut cache = WarpSyncFragmentCache::new(5); + let proof_no_cache = prove_warp_sync(&blockchain, hashes[6], None, Some(&mut cache)).unwrap(); + let proof = prove_warp_sync(&blockchain, hashes[6], None, Some(&mut cache)).unwrap(); + assert_eq!(proof_no_cache, proof); + + let initial_authorities: Vec<_> = [1u8, 2, 3].iter().map(|i| + (AuthorityId::from_slice(&[*i; 32]), 1u64) + ).collect(); + + let authorities_next: Vec<_> = [9u8].iter().map(|i| + (AuthorityId::from_slice(&[*i; 32]), 1u64) + ).collect(); + + assert!(check_warp_sync_proof::( + 0, + initial_authorities.clone(), + proof.clone(), + ).is_err()); + assert!(check_warp_sync_proof::( + 0, + authorities_next.clone(), + proof.clone(), + ).is_err()); + assert!(check_warp_sync_proof::( + 1, + initial_authorities.clone(), + proof.clone(), + ).is_err()); + let ( + _header, + current_set_id, + current_set, + ) = check_warp_sync_proof::( + 1, + authorities_next.clone(), + proof.clone(), ).unwrap(); - assert!(proof_of_4.is_none()); + + assert_eq!(current_set_id, 1); + assert_eq!(current_set, authorities_next); + + // proof before set change + let proof = prove_warp_sync(&blockchain, hashes[1], None, None).unwrap(); + let ( + _header, + current_set_id, + current_set, + ) = check_warp_sync_proof::( + 0, + initial_authorities.clone(), + proof.clone(), + ).unwrap(); + + assert_eq!(current_set_id, 1); + assert_eq!(current_set, authorities_next); + + // two changes + let (blockchain, hashes) = test_blockchain( + 13, + vec![(3, vec![7]), (8, vec![9])].as_slice(), + vec![ + (1, vec![1, 2, 3]), + (2, vec![1, 2, 3]), + (3, vec![1, 2, 3]), + (4, vec![7]), + (6, vec![7]), + (8, vec![7]), // warning, requires a justification on change set + (10, vec![9]), + ].as_slice(), + ); + + // proof before set change + let proof = prove_warp_sync(&blockchain, hashes[1], None, None).unwrap(); + let ( + _header, + current_set_id, + current_set, + ) = check_warp_sync_proof::( + 0, + initial_authorities.clone(), + proof.clone(), + ).unwrap(); + + assert_eq!(current_set_id, 2); + assert_eq!(current_set, authorities_next); } } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 04df95a3187e1..2eef13d583600 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,7 +25,7 @@ use parking_lot::RwLockWriteGuard; use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sp_utils::mpsc::TracingUnboundedSender; -use sp_api::{TransactionFor}; +use sp_api::TransactionFor; use sp_consensus::{ BlockImport, Error as ConsensusError, @@ -41,7 +41,6 @@ use sp_runtime::traits::{ use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; -use crate::consensus_changes::SharedConsensusChanges; use crate::environment::finalize_block; use crate::justification::GrandpaJustification; use crate::notification::GrandpaJustificationSender; @@ -61,7 +60,6 @@ pub struct GrandpaBlockImport { select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: HashMap>>, justification_sender: GrandpaJustificationSender, _phantom: PhantomData, @@ -76,7 +74,6 @@ impl Clone for select_chain: self.select_chain.clone(), authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), - consensus_changes: self.consensus_changes.clone(), authority_set_hard_forks: self.authority_set_hard_forks.clone(), justification_sender: self.justification_sender.clone(), _phantom: PhantomData, @@ -185,7 +182,7 @@ impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { } } -fn find_scheduled_change(header: &B::Header) +pub(crate) fn find_scheduled_change(header: &B::Header) -> Option>> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); @@ -200,7 +197,7 @@ fn find_scheduled_change(header: &B::Header) header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -fn find_forced_change(header: &B::Header) +pub(crate) fn find_forced_change(header: &B::Header) -> Option<(NumberFor, ScheduledChange>)> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); @@ -439,7 +436,6 @@ impl BlockImport // we don't want to finalize on `inner.import_block` let mut justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); let import_result = (&*self.inner).import_block(block, new_cache); let mut imported_aux = { @@ -517,7 +513,7 @@ impl BlockImport ); import_res.unwrap_or_else(|err| { - if needs_justification || enacts_consensus_change { + if needs_justification { debug!(target: "afg", "Imported block #{} that enacts authority set change with \ invalid justification: {:?}, requesting justification from peers.", number, err); imported_aux.bad_justification = true; @@ -535,12 +531,6 @@ impl BlockImport imported_aux.needs_justification = true; } - - // we have imported block with consensus data changes, but without justification - // => remember to create justification when next block will be finalized - if enacts_consensus_change { - self.consensus_changes.lock().note_change((number, hash)); - } } } @@ -561,7 +551,6 @@ impl GrandpaBlockImport>, send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: Vec<(SetId, PendingChange>)>, justification_sender: GrandpaJustificationSender, ) -> GrandpaBlockImport { @@ -605,7 +594,6 @@ impl GrandpaBlockImport BlockStatus for Arc where /// A trait that includes all the client functionalities grandpa requires. /// Ideally this would be a trait alias, we're not there yet. -/// tracking issue https://github.com/rust-lang/rust/issues/41517 +/// tracking issue pub trait ClientForGrandpa: LockImportRun + Finalizer + AuxStore + HeaderMetadata + HeaderBackend @@ -588,7 +586,6 @@ where select_chain.clone(), persistent_data.authority_set.clone(), voter_commands_tx, - persistent_data.consensus_changes.clone(), authority_set_hard_forks, justification_sender.clone(), ), @@ -656,6 +653,10 @@ pub struct GrandpaParams { /// A link to the block import worker. pub link: LinkHalf, /// The Network instance. + /// + /// It is assumed that this network will feed us Grandpa notifications. When using the + /// `sc_network` crate, it is assumed that the Grandpa notifications protocol has been passed + /// to the configuration of the networking. See [`grandpa_peers_set_config`]. pub network: N, /// If supplied, can be used to hook on telemetry connection established events. pub telemetry_on_connect: Option>, @@ -667,6 +668,22 @@ pub struct GrandpaParams { pub shared_voter_state: SharedVoterState, } +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +pub fn grandpa_peers_set_config() -> sc_network::config::NonDefaultSetConfig { + sc_network::config::NonDefaultSetConfig { + notifications_protocol: communication::GRANDPA_PROTOCOL_NAME.into(), + // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. + max_notification_size: 1024 * 1024, + set_config: sc_network::config::SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, + }, + } +} + /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. pub fn run_grandpa_voter( @@ -844,7 +861,6 @@ where network: network.clone(), set_id: persistent_data.authority_set.set_id(), authority_set: persistent_data.authority_set.clone(), - consensus_changes: persistent_data.consensus_changes.clone(), voter_set_state: persistent_data.set_state, metrics: metrics.as_ref().map(|m| m.environment.clone()), justification_sender: Some(justification_sender), @@ -989,7 +1005,6 @@ where select_chain: self.env.select_chain.clone(), config: self.env.config.clone(), authority_set: self.env.authority_set.clone(), - consensus_changes: self.env.consensus_changes.clone(), network: self.env.network.clone(), voting_rule: self.env.voting_rule.clone(), metrics: self.env.metrics.clone(), @@ -1071,27 +1086,6 @@ where } } -/// When GRANDPA is not initialized we still need to register the finality -/// tracker inherent provider which might be expected by the runtime for block -/// authoring. Additionally, we register a gossip message validator that -/// discards all GRANDPA messages (otherwise, we end up banning nodes that send -/// us a `Neighbor` message, since there is no registered gossip validator for -/// the engine id defined in the message.) -pub fn setup_disabled_grandpa(network: N) -> Result<(), sp_consensus::Error> -where - N: NetworkT + Send + Clone + 'static, -{ - // We register the GRANDPA protocol so that we don't consider it an anomaly - // to receive GRANDPA messages on the network. We don't process the - // messages. - network.register_notifications_protocol( - communication::GRANDPA_ENGINE_ID, - From::from(communication::GRANDPA_PROTOCOL_NAME), - ); - - Ok(()) -} - /// Checks if this node has any available keys in the keystore for any authority id in the given /// voter set. Returns the authority id for which keys are available, or `None` if no keys are /// available. diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs deleted file mode 100644 index a7c9a655467c7..0000000000000 --- a/client/finality-grandpa/src/light_import.rs +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::collections::HashMap; -use std::sync::Arc; -use log::{info, trace, warn}; -use parking_lot::RwLock; -use sc_client_api::backend::{AuxStore, Backend, Finalizer, TransactionFor}; -use sp_blockchain::{HeaderBackend, Error as ClientError, well_known_cache_keys}; -use parity_scale_codec::{Encode, Decode}; -use sp_consensus::{ - import_queue::Verifier, - BlockOrigin, BlockImport, FinalityProofImport, BlockImportParams, ImportResult, ImportedAux, - BlockCheckParams, Error as ConsensusError, -}; -use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofRequestBuilder}; -use sp_runtime::Justification; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT, DigestFor}; -use sp_finality_grandpa::{self, AuthorityList}; -use sp_runtime::generic::BlockId; - -use crate::GenesisAuthoritySetProvider; -use crate::aux_schema::load_decode; -use crate::consensus_changes::ConsensusChanges; -use crate::environment::canonical_at_height; -use crate::finality_proof::{ - AuthoritySetForFinalityChecker, ProvableJustification, make_finality_proof_request, -}; -use crate::justification::GrandpaJustification; - -/// LightAuthoritySet is saved under this key in aux storage. -const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -/// ConsensusChanges is saver under this key in aux storage. -const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; - -/// Create light block importer. -pub fn light_block_import( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, -) -> Result, ClientError> - where - BE: Backend, - Client: crate::ClientForGrandpa, -{ - let info = client.info(); - let import_data = load_aux_import_data( - info.finalized_hash, - &*client, - genesis_authorities_provider, - )?; - Ok(GrandpaLightBlockImport { - client, - backend, - authority_set_provider, - data: Arc::new(RwLock::new(import_data)), - }) -} - -/// A light block-import handler for GRANDPA. -/// -/// It is responsible for: -/// - checking GRANDPA justifications; -/// - fetching finality proofs for blocks that are enacting consensus changes. -pub struct GrandpaLightBlockImport { - client: Arc, - backend: Arc, - authority_set_provider: Arc>, - data: Arc>>, -} - -impl Clone for GrandpaLightBlockImport { - fn clone(&self) -> Self { - GrandpaLightBlockImport { - client: self.client.clone(), - backend: self.backend.clone(), - authority_set_provider: self.authority_set_provider.clone(), - data: self.data.clone(), - } - } -} - -/// Mutable data of light block importer. -struct LightImportData { - last_finalized: Block::Hash, - authority_set: LightAuthoritySet, - consensus_changes: ConsensusChanges>, -} - -/// Latest authority set tracker. -#[derive(Debug, Encode, Decode)] -struct LightAuthoritySet { - set_id: u64, - authorities: AuthorityList, -} - -impl GrandpaLightBlockImport { - /// Create finality proof request builder. - pub fn create_finality_proof_request_builder(&self) -> BoxFinalityProofRequestBuilder { - Box::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ - } -} - -impl BlockImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, -{ - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - do_import_block::<_, _, _, GrandpaJustification>( - &*self.client, &mut *self.data.write(), block, new_cache - ) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.client.check_block(block) - } -} - -impl FinalityProofImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, -{ - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - let mut out = Vec::new(); - let chain_info = (&*self.client).info(); - - let data = self.data.read(); - for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { - if *pending_number > chain_info.finalized_number - && *pending_number <= chain_info.best_number - { - out.push((*pending_hash, *pending_number)); - } - } - - out - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - do_import_finality_proof::<_, _, _, GrandpaJustification>( - &*self.client, - self.backend.clone(), - &*self.authority_set_provider, - &mut *self.data.write(), - hash, - number, - finality_proof, - verifier, - ) - } -} - -impl LightAuthoritySet { - /// Get a genesis set with given authorities. - pub fn genesis(initial: AuthorityList) -> Self { - LightAuthoritySet { - set_id: sp_finality_grandpa::SetId::default(), - authorities: initial, - } - } - - /// Get latest set id. - pub fn set_id(&self) -> u64 { - self.set_id - } - - /// Get latest authorities set. - pub fn authorities(&self) -> AuthorityList { - self.authorities.clone() - } - - /// Set new authorities set. - pub fn update(&mut self, set_id: u64, authorities: AuthorityList) { - self.set_id = set_id; - self.authorities = authorities; - } -} - -struct GrandpaFinalityProofRequestBuilder(Arc>>); - -impl FinalityProofRequestBuilder for GrandpaFinalityProofRequestBuilder { - fn build_request_data(&mut self, _hash: &B::Hash) -> Vec { - let data = self.0.read(); - make_finality_proof_request( - data.last_finalized, - data.authority_set.set_id(), - ) - } -} - -/// Try to import new block. -fn do_import_block( - mut client: C, - data: &mut LightImportData, - mut block: BlockImportParams>, - new_cache: HashMap>, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - J: ProvableJustification, -{ - let hash = block.post_hash(); - let number = *block.header.number(); - - // we don't want to finalize on `inner.import_block` - let justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = client.import_block(block, new_cache); - - let mut imported_aux = match import_result { - Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => return Ok(r), - Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - }; - - match justification { - Some(justification) => { - trace!( - target: "afg", - "Imported block {}{}. Importing justification.", - if enacts_consensus_change { " which enacts consensus changes" } else { "" }, - hash, - ); - - do_import_justification::<_, _, _, J>(client, data, hash, number, justification) - }, - None if enacts_consensus_change => { - trace!( - target: "afg", - "Imported block {} which enacts consensus changes. Requesting finality proof.", - hash, - ); - - // remember that we need finality proof for this block - imported_aux.needs_finality_proof = true; - data.consensus_changes.note_change((number, hash)); - Ok(ImportResult::Imported(imported_aux)) - }, - None => Ok(ImportResult::Imported(imported_aux)), - } -} - -/// Try to import finality proof. -fn do_import_finality_proof( - client: C, - backend: Arc, - authority_set_provider: &dyn AuthoritySetForFinalityChecker, - data: &mut LightImportData, - _hash: Block::Hash, - _number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, -) -> Result<(Block::Hash, NumberFor), ConsensusError> - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - DigestFor: Encode, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, -{ - let authority_set_id = data.authority_set.set_id(); - let authorities = data.authority_set.authorities(); - let finality_effects = crate::finality_proof::check_finality_proof::<_, _, J>( - backend.blockchain(), - authority_set_id, - authorities, - authority_set_provider, - finality_proof, - ).map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - // try to import all new headers - let block_origin = BlockOrigin::NetworkBroadcast; - for header_to_import in finality_effects.headers_to_import { - let (block_to_import, new_authorities) = verifier.verify( - block_origin, - header_to_import, - None, - None, - ).map_err(|e| ConsensusError::ClientImport(e))?; - assert!( - block_to_import.justification.is_none(), - "We have passed None as justification to verifier.verify", - ); - - let mut cache = HashMap::new(); - if let Some(authorities) = new_authorities { - cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - } - do_import_block::<_, _, _, J>( - client.clone(), - data, - block_to_import.convert_transaction(), - cache, - )?; - } - - // try to import latest justification - let finalized_block_hash = finality_effects.block; - let finalized_block_number = backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(finality_effects.block)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - do_finalize_block( - client.clone(), - data, - finalized_block_hash, - finalized_block_number, - finality_effects.justification.encode(), - )?; - - // apply new authorities set - data.authority_set.update( - finality_effects.new_set_id, - finality_effects.new_authorities, - ); - - // store new authorities set - require_insert_aux( - &client, - LIGHT_AUTHORITY_SET_KEY, - &data.authority_set, - "authority set", - )?; - - Ok((finalized_block_hash, finalized_block_number)) -} - -/// Try to import justification. -fn do_import_justification( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, -{ - // with justification, we have two cases - // - // optimistic: the same GRANDPA authorities set has generated intermediate justification - // => justification is verified using current authorities set + we could proceed further - // - // pessimistic scenario: the GRANDPA authorities set has changed - // => we need to fetch new authorities set (i.e. finality proof) from remote node - - // first, try to behave optimistically - let authority_set_id = data.authority_set.set_id(); - let justification = J::decode_and_verify( - &justification, - authority_set_id, - &data.authority_set.authorities(), - ); - - // BadJustification error means that justification has been successfully decoded, but - // it isn't valid within current authority set - let justification = match justification { - Err(ClientError::BadJustification(_)) => { - trace!( - target: "afg", - "Justification for {} is not valid within current authorities set. Requesting finality proof.", - hash, - ); - - let mut imported_aux = ImportedAux::default(); - imported_aux.needs_finality_proof = true; - return Ok(ImportResult::Imported(imported_aux)); - }, - Err(e) => { - trace!( - target: "afg", - "Justification for {} is not valid. Bailing.", - hash, - ); - - return Err(ConsensusError::ClientImport(e.to_string())); - }, - Ok(justification) => { - trace!( - target: "afg", - "Justification for {} is valid. Finalizing the block.", - hash, - ); - - justification - }, - }; - - // finalize the block - do_finalize_block(client, data, hash, number, justification.encode()) -} - -/// Finalize the block. -fn do_finalize_block( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, -{ - // finalize the block - client.finalize_block(BlockId::Hash(hash), Some(justification), true).map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); - ConsensusError::ClientImport(e.to_string()) - })?; - - // forget obsoleted consensus changes - let consensus_finalization_res = data.consensus_changes - .finalize( - (number, hash), - |at_height| canonical_at_height(&client, (hash, number), true, at_height) - ); - match consensus_finalization_res { - Ok((true, _)) => require_insert_aux( - &client, - LIGHT_CONSENSUS_CHANGES_KEY, - &data.consensus_changes, - "consensus changes", - )?, - Ok(_) => (), - Err(error) => return Err(on_post_finalization_error(error, "consensus changes")), - } - - // update last finalized block reference - data.last_finalized = hash; - - // we just finalized this block, so if we were importing it, it is now the new best - Ok(ImportResult::imported(true)) -} - -/// Load light import aux data from the store. -fn load_aux_import_data( - last_finalized: Block::Hash, - aux_store: &B, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, -) -> Result, ClientError> - where - B: AuxStore, - Block: BlockT, -{ - let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { - Some(authority_set) => authority_set, - None => { - info!(target: "afg", "Loading GRANDPA authorities \ - from genesis on what appears to be first startup."); - - // no authority set on disk: fetch authorities from genesis state - let genesis_authorities = genesis_authorities_provider.get()?; - - let authority_set = LightAuthoritySet::genesis(genesis_authorities); - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; - - authority_set - }, - }; - - let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { - Some(consensus_changes) => consensus_changes, - None => { - let consensus_changes = ConsensusChanges::>::empty(); - - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; - - consensus_changes - }, - }; - - Ok(LightImportData { - last_finalized, - authority_set, - consensus_changes, - }) -} - -/// Insert into aux store. If failed, return error && show inconsistency warning. -fn require_insert_aux( - store: &A, - key: &[u8], - value: &T, - value_type: &str, -) -> Result<(), ConsensusError> { - let encoded = value.encode(); - let update_res = store.insert_aux(&[(key, &encoded[..])], &[]); - if let Err(error) = update_res { - return Err(on_post_finalization_error(error, value_type)); - } - - Ok(()) -} - -/// Display inconsistency warning. -fn on_post_finalization_error(error: ClientError, value_type: &str) -> ConsensusError { - warn!(target: "afg", "Failed to write updated {} to disk. Bailing.", value_type); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - ConsensusError::ClientImport(error.to_string()) -} - -#[cfg(test)] -pub mod tests { - use super::*; - use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; - use sp_finality_grandpa::AuthorityId; - use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof, BlockBackend}; - use substrate_test_runtime_client::runtime::{Block, Header}; - use crate::tests::TestApi; - use crate::finality_proof::{ - FinalityProofFragment, - tests::{TestJustification, ClosureAuthoritySetForFinalityChecker}, - }; - - struct OkVerifier; - - impl Verifier for OkVerifier { - fn verify( - &mut self, - origin: BlockOrigin, - header: Header, - _justification: Option, - _body: Option::Extrinsic>>, - ) -> Result<(BlockImportParams, Option)>>), String> { - Ok((BlockImportParams::new(origin, header), None)) - } - } - - pub struct NoJustificationsImport( - pub GrandpaLightBlockImport - ); - - impl Clone - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - { - fn clone(&self) -> Self { - NoJustificationsImport(self.0.clone()) - } - } - - impl BlockImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - GrandpaLightBlockImport: - BlockImport, Error = ConsensusError> - { - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - block.justification.take(); - self.0.import_block(block, new_cache) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.0.check_block(block) - } - } - - impl FinalityProofImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - BE: Backend + 'static, - DigestFor: Encode, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - { - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - self.0.on_start() - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - self.0.import_finality_proof(hash, number, finality_proof, verifier) - } - } - - /// Creates light block import that ignores justifications that came outside of finality proofs. - pub fn light_block_import_without_justifications( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, - ) -> Result, ClientError> - where - BE: Backend + 'static, - Client: crate::ClientForGrandpa, - { - light_block_import(client, backend, genesis_authorities_provider, authority_set_provider) - .map(NoJustificationsImport) - } - - fn import_block( - new_cache: HashMap>, - justification: Option, - ) -> ( - ImportResult, - substrate_test_runtime_client::client::Client, - Arc, - ) { - let (client, backend) = substrate_test_runtime_client::new_light(); - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]), - consensus_changes: ConsensusChanges::empty(), - }; - let mut block = BlockImportParams::new( - BlockOrigin::Own, - Header { - number: 1, - parent_hash: client.chain_info().best_hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }, - ); - block.justification = justification; - block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - ( - do_import_block::<_, _, _, TestJustification>( - &client, - &mut import_data, - block, - new_cache, - ).unwrap(), - client, - backend, - ) - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided() { - assert_eq!(import_block(HashMap::new(), None).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided() { - let justification = TestJustification((0, vec![(AuthorityId::from_slice(&[1; 32]), 1)]), Vec::new()).encode(); - assert_eq!(import_block(HashMap::new(), Some(justification)).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() { - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!(import_block(cache, None).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() { - let justification = TestJustification((0, vec![]), Vec::new()).encode(); - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!( - import_block(cache, Some(justification)).0, - ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: false, - header_only: false, - }, - )); - } - - - #[test] - fn aux_data_updated_on_start() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is empty initially - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none()); - - // it is updated on importer start - load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); - } - - #[test] - fn aux_data_loaded_on_restart() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is non-empty initially - let mut consensus_changes = ConsensusChanges::::empty(); - consensus_changes.note_change((42, Default::default())); - aux_store.insert_aux( - &[ - ( - LIGHT_AUTHORITY_SET_KEY, - LightAuthoritySet::genesis( - vec![(AuthorityId::from_slice(&[42; 32]), 2)] - ).encode().as_slice(), - ), - ( - LIGHT_CONSENSUS_CHANGES_KEY, - consensus_changes.encode().as_slice(), - ), - ], - &[], - ).unwrap(); - - // importer uses it on start - let data = load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert_eq!(data.authority_set.authorities(), vec![(AuthorityId::from_slice(&[42; 32]), 2)]); - assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]); - } - - #[test] - fn authority_set_is_updated_on_finality_proof_import() { - let initial_set_id = 0; - let initial_set = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; - let updated_set = vec![(AuthorityId::from_slice(&[2; 32]), 2)]; - let babe_set_signal = vec![AuthorityId::from_slice(&[42; 32])].encode(); - - // import block #1 without justification - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, babe_set_signal); - let (_, client, backend) = import_block(cache, None); - - // import finality proof for block #1 - let hash = client.block_hash(1).unwrap().unwrap(); - let mut verifier = OkVerifier; - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(initial_set.clone()), - consensus_changes: ConsensusChanges::empty(), - }; - - // import finality proof - do_import_finality_proof::<_, _, _, TestJustification>( - &client, - backend, - &ClosureAuthoritySetForFinalityChecker( - |_, _, _| Ok(updated_set.clone()) - ), - &mut import_data, - Default::default(), - Default::default(), - vec![ - FinalityProofFragment::
{ - block: hash, - justification: TestJustification( - (initial_set_id, initial_set.clone()), - Vec::new(), - ).encode(), - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![])), - }, - ].encode(), - &mut verifier, - ).unwrap(); - - // verify that new authorities set has been saved to the aux storage - let data = load_aux_import_data(Default::default(), &client, &TestApi::new(initial_set)).unwrap(); - assert_eq!(data.authority_set.authorities(), updated_set); - } -} diff --git a/client/finality-grandpa/src/notification.rs b/client/finality-grandpa/src/notification.rs index 8415583051902..b545f0d8a637e 100644 --- a/client/finality-grandpa/src/notification.rs +++ b/client/finality-grandpa/src/notification.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index fd00b35c40a73..c9db917e1699a 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -39,7 +39,6 @@ use crate::{ }; use crate::authorities::SharedAuthoritySet; use crate::communication::{Network as NetworkT, NetworkBridge}; -use crate::consensus_changes::SharedConsensusChanges; use crate::notification::GrandpaJustificationSender; use sp_finality_grandpa::AuthorityId; use std::marker::{PhantomData, Unpin}; @@ -68,7 +67,6 @@ impl<'a, Block, Client> finality_grandpa::Chain> fn grandpa_observer( client: &Arc, authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, voters: &Arc>, justification_sender: &Option>, last_finalized_number: NumberFor, @@ -83,7 +81,6 @@ where Client: crate::ClientForGrandpa, { let authority_set = authority_set.clone(); - let consensus_changes = consensus_changes.clone(); let client = client.clone(); let voters = voters.clone(); let justification_sender = justification_sender.clone(); @@ -123,7 +120,6 @@ where match environment::finalize_block( client.clone(), &authority_set, - &consensus_changes, None, finalized_hash, finalized_number, @@ -293,7 +289,6 @@ where let observer = grandpa_observer( &self.client, &self.persistent_data.authority_set, - &self.persistent_data.consensus_changes, &voters, &self.justification_sender, last_finalized_number, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index cf1b2ef986277..b94981838138a 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,33 +25,27 @@ use sc_network_test::{ Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestClient, TestNetFactory, FullPeerConfig, }; -use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; +use sc_network::config::ProtocolConfig; use parking_lot::{RwLock, Mutex}; use futures_timer::Delay; use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; -use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, - import_queue::{BoxJustificationImport, BoxFinalityProofImport}, + import_queue::BoxJustificationImport, }; use std::{collections::{HashMap, HashSet}, pin::Pin}; -use parity_scale_codec::Decode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_runtime::generic::{BlockId, DigestItem}; -use sp_core::{H256, crypto::Public}; +use sp_core::H256; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; -use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; use authorities::AuthoritySet; -use finality_proof::{ - FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker, -}; -use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; use sc_keystore::LocalKeystore; @@ -99,9 +93,7 @@ impl TestNetFactory for GrandpaTestNet { fn add_full_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![ - (communication::GRANDPA_ENGINE_ID, communication::GRANDPA_PROTOCOL_NAME.into()) - ], + notifications_protocols: vec![communication::GRANDPA_PROTOCOL_NAME.into()], ..Default::default() }) } @@ -119,8 +111,6 @@ impl TestNetFactory for GrandpaTestNet { -> ( BlockImportAdapter, Option>, - Option>, - Option>, PeerData, ) { @@ -135,45 +125,12 @@ impl TestNetFactory for GrandpaTestNet { ( BlockImportAdapter::new_full(import), Some(justification_import), - None, - None, Mutex::new(Some(link)), ) }, - PeersClient::Light(ref client, ref backend) => { - use crate::light_import::tests::light_block_import_without_justifications; - - let authorities_provider = Arc::new(self.test_config.clone()); - // forbid direct finalization using justification that came with the block - // => light clients will try to fetch finality proofs - let import = light_block_import_without_justifications( - client.clone(), - backend.clone(), - &self.test_config, - authorities_provider, - ).expect("Could not create block import for fresh peer."); - let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); - let proof_import = Box::new(import.clone()); - ( - BlockImportAdapter::new_light(import), - None, - Some(proof_import), - Some(finality_proof_req_builder), - Mutex::new(None), - ) - }, - } - } - - fn make_finality_proof_provider( - &self, - client: PeersClient - ) -> Option>> { - match client { - PeersClient::Full(_, ref backend) => { - Some(Arc::new(FinalityProofProvider::new(backend.clone(), self.test_config.clone()))) + PeersClient::Light(..) => { + panic!("Light client is not used in tests."); }, - PeersClient::Light(_, _) => None, } } @@ -245,43 +202,6 @@ impl GenesisAuthoritySetProvider for TestApi { } } -impl AuthoritySetForFinalityProver for TestApi { - fn authorities(&self, _block: &BlockId) -> Result { - Ok(self.genesis_authorities.clone()) - } - - fn prove_authorities(&self, block: &BlockId) -> Result { - let authorities = self.authorities(block)?; - let backend = >>::from(vec![ - (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) - ]); - let proof = prove_read(backend, vec![b"authorities"]) - .expect("failure proving read from in-memory storage backend"); - Ok(proof) - } -} - -impl AuthoritySetForFinalityChecker for TestApi { - fn check_authorities_proof( - &self, - _hash: ::Hash, - header: ::Header, - proof: StorageProof, - ) -> Result { - let results = read_proof_check::, _>( - *header.state_root(), proof, vec![b"authorities"] - ) - .expect("failure checking read proof for authorities"); - let encoded = results.get(&b"authorities"[..]) - .expect("returned map must contain all proof keys") - .as_ref() - .expect("authorities in proof is None"); - let authorities = Decode::decode(&mut &encoded[..]) - .expect("failure decoding authorities read from proof"); - Ok(authorities) - } -} - const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { @@ -307,6 +227,52 @@ fn block_until_complete(future: impl Future + Unpin, net: &Arc impl Future { + let voters = stream::FuturesUnordered::new(); + + for (peer_id, key) in peers.iter().enumerate() { + let (keystore, _) = create_keystore(*key); + + let (net_service, link) = { + // temporary needed for some reason + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[peer_id].network_service().clone(), + link, + ) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + + fn assert_send(_: &T) { } + assert_send(&voter); + + voters.push(voter); + } + + voters.for_each(|_| async move {}) +} + // run the voters to completion. provide a closure to be invoked after // the voters are spawned but before blocking on them. fn run_to_completion_with( @@ -326,22 +292,9 @@ fn run_to_completion_with( wait_for.push(f); }; - let mut keystore_paths = Vec::new(); - for (peer_id, key) in peers.iter().enumerate() { - let (keystore, keystore_path) = create_keystore(*key); - keystore_paths.push(keystore_path); - + for (peer_id, _) in peers.iter().enumerate() { let highest_finalized = highest_finalized.clone(); - let (client, net_service, link) = { - let net = net.lock(); - // temporary needed for some reason - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; + let client = net.lock().peers[peer_id].client().clone(); wait_for.push( Box::pin( @@ -357,30 +310,6 @@ fn run_to_completion_with( .map(|_| ()) ) ); - - fn assert_send(_: &T) { } - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - assert_send(&voter); - - runtime.spawn(voter); } // wait for all finalized on each. @@ -426,6 +355,7 @@ fn finalize_3_voters_no_observers() { let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -452,50 +382,18 @@ fn finalize_3_voters_1_full_observer() { let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); + runtime.spawn(initialize_grandpa(&mut net, peers)); - let net = Arc::new(Mutex::new(net)); - let mut finality_notifications = Vec::new(); - - let all_peers = peers.iter() - .cloned() - .map(Some) - .chain(std::iter::once(None)); - - let mut keystore_paths = Vec::new(); - - let mut voters = Vec::new(); - - for (peer_id, local_key) in all_peers.enumerate() { - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &20)) - .for_each(move |_| future::ready(())) - ); - - let keystore = if let Some(local_key) = local_key { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - Some(keystore) - } else { - None - }; + runtime.spawn({ + let peer_id = 3; + let net_service = net.peers[peer_id].network_service().clone(); + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, - keystore, + keystore: None, name: Some(format!("peer#{}", peer_id)), is_authority: true, observer_enabled: true, @@ -508,11 +406,21 @@ fn finalize_3_voters_1_full_observer() { shared_voter_state: SharedVoterState::empty(), }; - voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); - } + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); - for voter in voters { - runtime.spawn(voter); + net.peer(0).push_blocks(20, false); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().clone(); + finality_notifications.push( + client.finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &20)) + .for_each(move |_| future::ready(())) + ); } // wait for all finalized on each. @@ -545,6 +453,13 @@ fn transition_3_voters_twice_1_full_observer() { let observer = &[Ed25519Keyring::One]; + let all_peers = peers_a.iter() + .chain(peers_b) + .chain(peers_c) + .chain(observer) + .cloned() + .collect::>(); // deduplicate + let genesis_voters = make_ids(peers_a); let api = TestApi::new(genesis_voters); @@ -552,6 +467,41 @@ fn transition_3_voters_twice_1_full_observer() { let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); + let mut voters = Vec::new(); + for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { + let (keystore, keystore_path) = create_keystore(local_key); + keystore_paths.push(keystore_path); + + let (net_service, link) = { + let net = net.lock(); + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[peer_id].network_service().clone(), + link, + ) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + + voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); + } + net.lock().peer(0).push_blocks(1, false); net.lock().block_until_sync(); @@ -617,30 +567,13 @@ fn transition_3_voters_twice_1_full_observer() { } let mut finality_notifications = Vec::new(); - let all_peers = peers_a.iter() - .chain(peers_b) - .chain(peers_c) - .chain(observer) - .cloned() - .collect::>() // deduplicate - .into_iter() - .enumerate(); - - let mut keystore_paths = Vec::new(); - for (peer_id, local_key) in all_peers { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; + for voter in voters { + runtime.spawn(voter); + } + for (peer_id, _) in all_peers.into_iter().enumerate() { + let client = net.lock().peers[peer_id].client().clone(); finality_notifications.push( client.finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &30)) @@ -653,26 +586,6 @@ fn transition_3_voters_twice_1_full_observer() { assert_eq!(set.pending_changes().count(), 0); }) ); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - runtime.spawn(voter); } // wait for all finalized on each. @@ -681,24 +594,6 @@ fn transition_3_voters_twice_1_full_observer() { block_until_complete(wait_for, &net, &mut runtime); } -#[test] -fn justification_is_emitted_when_consensus_data_changes() { - let mut runtime = Runtime::new().unwrap(); - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); - - // import block#1 WITH consensus data change - let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; - net.peer(0).push_authorities_change_block(new_authorities); - net.block_until_sync(); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - - // ... and check that there's justification for block#1 - assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(), - "Missing justification for block#1"); -} - #[test] fn justification_is_generated_periodically() { let mut runtime = Runtime::new().unwrap(); @@ -706,6 +601,7 @@ fn justification_is_generated_periodically() { let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(32, false); net.block_until_sync(); @@ -719,25 +615,6 @@ fn justification_is_generated_periodically() { } } -#[test] -fn consensus_changes_works() { - let mut changes = ConsensusChanges::::empty(); - - // pending changes are not finalized - changes.note_change((10, H256::from_low_u64_be(1))); - assert_eq!(changes.finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)).unwrap(), (false, false)); - - // no change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1001)))).unwrap(), (true, false)); - - // change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1)))).unwrap(), (true, true)); -} - #[test] fn sync_justifications_on_change_blocks() { let mut runtime = Runtime::new().unwrap(); @@ -748,6 +625,7 @@ fn sync_justifications_on_change_blocks() { // 4 peers, 3 of them are authorities and participate in grandpa let api = TestApi::new(voters); let mut net = GrandpaTestNet::new(api, 4); + let voters = initialize_grandpa(&mut net, peers_a); // add 20 blocks net.peer(0).push_blocks(20, false); @@ -772,6 +650,7 @@ fn sync_justifications_on_change_blocks() { } let net = Arc::new(Mutex::new(net)); + runtime.spawn(voters); run_to_completion(&mut runtime, 25, net.clone(), peers_a); // the first 3 peers are grandpa voters and therefore have already finalized @@ -809,6 +688,7 @@ fn finalizes_multiple_pending_changes_in_order() { // 6 peers, 3 of them are authorities and participate in grandpa from genesis let api = TestApi::new(genesis_voters); let mut net = GrandpaTestNet::new(api, 6); + runtime.spawn(initialize_grandpa(&mut net, all_peers)); // add 20 blocks net.peer(0).push_blocks(20, false); @@ -867,7 +747,8 @@ fn force_change_to_new_set() { let api = TestApi::new(make_ids(genesis_authorities)); let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); + let mut net = GrandpaTestNet::new(api, 3); + let voters_future = initialize_grandpa(&mut net, peers_a); let net = Arc::new(Mutex::new(net)); net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { @@ -905,6 +786,7 @@ fn force_change_to_new_set() { // it will only finalize if the forced transition happens. // we add_blocks after the voters are spawned because otherwise // the link-halves have the wrong AuthoritySet + runtime.spawn(voters_future); run_to_completion(&mut runtime, 25, net, peers_a); } @@ -946,7 +828,6 @@ fn allows_reimporting_change_blocks() { needs_justification: true, clear_justification_requests: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, header_only: false, }), @@ -1013,10 +894,10 @@ fn test_bad_justification() { fn voter_persists_its_votes() { use std::sync::atomic::{AtomicUsize, Ordering}; use futures::future; - use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -1025,152 +906,150 @@ fn voter_persists_its_votes() { // alice has a chain with 20 blocks let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - assert_eq!(net.peer(0).client().info().best_number, 20, - "Peer #{} failed to sync", 0); - - - let peer = net.peer(0); - let client = peer.client().clone(); - let net = Arc::new(Mutex::new(net)); - - // channel between the voter and the main controller. - // sending a message on the `voter_tx` restarts the voter. - let (voter_tx, voter_rx) = tracing_unbounded::<()>(""); - - let mut keystore_paths = Vec::new(); - - // startup a grandpa voter for alice but also listen for messages on a - // channel. whenever a message is received the voter is restarted. when the - // sender is dropped the voter is stopped. - { - let (keystore, keystore_path) = create_keystore(peers[0]); - keystore_paths.push(keystore_path); - - struct ResettableVoter { - voter: Pin + Send + Unpin>>, - voter_rx: TracingUnboundedReceiver<()>, - net: Arc>, - client: PeersClient, - keystore: SyncCryptoStorePtr, - } - - impl Future for ResettableVoter { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); - - if let Poll::Ready(()) = Pin::new(&mut this.voter).poll(cx) { - panic!("error in the voter"); - } - - match Pin::new(&mut this.voter_rx).poll_next(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Ready(Some(())) => { - let (_block_import, _, _, _, link) = - this.net.lock() - .make_block_import::< - TransactionFor - >(this.client.clone()); - let link = link.lock().take().unwrap(); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(this.keystore.clone()), - name: Some(format!("peer#{}", 0)), - is_authority: true, - observer_enabled: true, - }, - link, - network: this.net.lock().peers[0].network_service().clone(), - telemetry_on_connect: None, - voting_rule: VotingRulesBuilder::default().build(), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - - let voter = run_grandpa_voter(grandpa_params) - .expect("all in order with client and network") - .map(move |r| { - // we need to keep the block_import alive since it owns the - // sender for the voter commands channel, if that gets dropped - // then the voter will stop - drop(_block_import); - r - }); - - this.voter = Box::pin(voter); - // notify current task in order to poll the voter - cx.waker().wake_by_ref(); - } - }; - - Poll::Pending - } - } - - // we create a "dummy" voter by setting it to `pending` and triggering the `tx`. - // this way, the `ResettableVoter` will reset its `voter` field to a value ASAP. - voter_tx.unbounded_send(()).unwrap(); - runtime.spawn(ResettableVoter { - voter: Box::pin(futures::future::pending()), - voter_rx, - net: net.clone(), - client: client.clone(), - keystore, - }); - } - - let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); // create the communication layer for bob, but don't start any // voter. instead we'll listen for the prevote that alice casts // and cast our own manually - { + let bob_keystore = { let (keystore, keystore_path) = create_keystore(peers[1]); keystore_paths.push(keystore_path); - + keystore + }; + let bob_network = { let config = Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, - keystore: Some(keystore.clone()), + keystore: Some(bob_keystore.clone()), name: Some(format!("peer#{}", 1)), is_authority: true, observer_enabled: true, }; let set_state = { - let (_, _, _, _, link) = net.lock() + let bob_client = net.peer(1).client().clone(); + let (_, _, link) = net .make_block_import::< TransactionFor - >(client); + >(bob_client); let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); let PersistentData { set_state, .. } = persistent_data; set_state }; - let network = communication::NetworkBridge::new( - net.lock().peers[1].network_service().clone(), + communication::NetworkBridge::new( + net.peers[1].network_service().clone(), config.clone(), set_state, None, - ); + ) + }; + + // spawn two voters for alice. + // half-way through the test, we stop one and start the other. + let (alice_voter1, abort) = future::abortable({ + let (keystore, _) = create_keystore(peers[0]); + + let (net_service, link) = { + // temporary needed for some reason + let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[0].network_service().clone(), + link, + ) + }; - let (round_rx, round_tx) = network.round_communication( - Some((peers[1].public().into(), keystore).into()), + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); + + fn alice_voter2( + peers: &[Ed25519Keyring], + net: Arc>, + ) -> impl Future + Unpin + Send + 'static { + let (keystore, _) = create_keystore(peers[0]); + let mut net = net.lock(); + + // we add a new peer to the test network and we'll use + // the network service of this new peer + net.add_full_peer(); + let net_service = net.peers[2].network_service().clone(); + // but we'll reuse the client from the first peer (alice_voter1) + // since we want to share the same database, so that we can + // read the persisted state after aborting alice_voter1. + let alice_client = net.peer(0).client().clone(); + + let (_block_import, _, link) = net + .make_block_import::< + TransactionFor + >(alice_client); + let link = link.lock().take().unwrap(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + + run_grandpa_voter(grandpa_params) + .expect("all in order with client and network") + .map(move |r| { + // we need to keep the block_import alive since it owns the + // sender for the voter commands channel, if that gets dropped + // then the voter will stop + drop(_block_import); + r + }) + } + + runtime.spawn(alice_voter1); + + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + assert_eq!(net.peer(0).client().info().best_number, 20, + "Peer #{} failed to sync", 0); + + let net = Arc::new(Mutex::new(net)); + + let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); + + { + let (round_rx, round_tx) = bob_network.round_communication( + Some((peers[1].public().into(), bob_keystore).into()), communication::Round(1), communication::SetId(0), Arc::new(VoterSet::new(voters).unwrap()), HasVoted::No, ); - runtime.spawn(network); + runtime.spawn(bob_network); let round_tx = Arc::new(Mutex::new(round_tx)); let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); @@ -1178,16 +1057,18 @@ fn voter_persists_its_votes() { let net = net.clone(); let state = Arc::new(AtomicUsize::new(0)); + let runtime_handle = runtime.handle().clone(); runtime.spawn(round_rx.for_each(move |signed| { let net2 = net.clone(); let net = net.clone(); - let voter_tx = voter_tx.clone(); + let abort = abort.clone(); let round_tx = round_tx.clone(); let state = state.clone(); let exit_tx = exit_tx.clone(); + let runtime_handle = runtime_handle.clone(); async move { - if state.compare_and_swap(0, 1, Ordering::SeqCst) == 0 { + if state.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 0 { // the first message we receive should be a prevote from alice. let prevote = match signed.message { finality_grandpa::Message::Prevote(prevote) => prevote, @@ -1196,7 +1077,7 @@ fn voter_persists_its_votes() { // its chain has 20 blocks and the voter targets 3/4 of the // unfinalized chain, so the vote should be for block 15 - assert!(prevote.target_number == 15); + assert_eq!(prevote.target_number, 15); // we push 20 more blocks to alice's chain net.lock().peer(0).push_blocks(20, false); @@ -1219,7 +1100,8 @@ fn voter_persists_its_votes() { net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap(); // we restart alice's voter - voter_tx.unbounded_send(()).unwrap(); + abort.abort(); + runtime_handle.spawn(alice_voter2(peers, net.clone())); // and we push our own prevote for block 30 let prevote = finality_grandpa::Prevote { @@ -1232,7 +1114,7 @@ fn voter_persists_its_votes() { // we send in a loop including a delay until items are received, this can be // ignored for the sake of reduced complexity. Pin::new(&mut *round_tx.lock()).start_send(finality_grandpa::Message::Prevote(prevote)).unwrap(); - } else if state.compare_and_swap(1, 2, Ordering::SeqCst) == 1 { + } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 1 { // the next message we receive should be our own prevote let prevote = match signed.message { finality_grandpa::Message::Prevote(prevote) => prevote, @@ -1246,7 +1128,7 @@ fn voter_persists_its_votes() { // therefore we won't ever receive it again since it will be a // known message on the gossip layer - } else if state.compare_and_swap(2, 3, Ordering::SeqCst) == 2 { + } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 2 { // we then receive a precommit from alice for block 15 // even though we casted a prevote for block 30 let precommit = match signed.message { @@ -1276,6 +1158,19 @@ fn finalize_3_voters_1_light_observer() { let voters = make_ids(authorities); let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let voters = initialize_grandpa(&mut net, authorities); + let observer = observer::run_grandpa_observer( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: None, + name: Some("observer".to_string()), + is_authority: false, + observer_enabled: true, + }, + net.peers[3].data.lock().take().expect("link initialized at startup; qed"), + net.peers[3].network_service().clone(), + ).unwrap(); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -1285,126 +1180,10 @@ fn finalize_3_voters_1_light_observer() { } let net = Arc::new(Mutex::new(net)); - let link = net.lock().peer(3).data.lock().take().expect("link initialized on startup; qed"); - - let finality_notifications = net.lock().peer(3).client().finality_notification_stream() - .take_while(|n| { - future::ready(n.header.number() < &20) - }) - .collect::>(); - - run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { - executor.spawn( - observer::run_grandpa_observer( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: None, - name: Some("observer".to_string()), - is_authority: false, - observer_enabled: true, - }, - link, - net.lock().peers[3].network_service().clone(), - ).unwrap() - ); - Some(Box::pin(finality_notifications.map(|_| ()))) - }); -} - -#[test] -fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { - sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); - - let peers = &[Ed25519Keyring::Alice]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); - net.add_light_peer(); - - // import block#1 WITH consensus data change. Light client ignores justification - // && instead fetches finality proof for block #1 - net.peer(0).push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - net.lock().block_until_sync(); - - // check that the block#1 is finalized on light client - runtime.block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(1).client().info().finalized_number == 1 { - Poll::Ready(()) - } else { - net.lock().poll(cx); - Poll::Pending - } - })); -} - -#[test] -fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different() { - // for debug: to ensure that without forced change light client will sync finality proof - const FORCE_CHANGE: bool = true; - - sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); - - // two of these guys are offline. - let genesis_authorities = if FORCE_CHANGE { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - Ed25519Keyring::One, - Ed25519Keyring::Two, - ] - } else { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ] - }; - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(&genesis_authorities)); - - let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); - let net = Arc::new(Mutex::new(net)); - - // best is #1 - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - // add a forced transition at block 5. - let mut block = builder.build().unwrap().block; - if FORCE_CHANGE { - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 3, - }); - } - block - }); - - // ensure block#10 enacts authorities set change => justification is generated - // normally it will reach light client, but because of the forced change, it will not - net.lock().peer(0).push_blocks(8, false); // best is #9 - net.lock().peer(0).push_authorities_change_block( - vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])] - ); // #10 - net.lock().peer(0).push_blocks(1, false); // best is #11 - net.lock().block_until_sync(); - - // finalize block #11 on full clients - run_to_completion(&mut runtime, 11, net.clone(), peers_a); - - // request finalization by light client - net.lock().add_light_peer(); - net.lock().block_until_sync(); - - // check block, finalized on light client - assert_eq!( - net.lock().peer(3).client().info().finalized_number, - if FORCE_CHANGE { 0 } else { 10 }, - ); + runtime.spawn(voters); + runtime.spawn(observer); + run_to_completion(&mut runtime, 20, net.clone(), authorities); } #[test] @@ -1415,9 +1194,7 @@ fn voter_catches_up_to_latest_round_when_behind() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(50, false); - net.block_until_sync(); + let net = GrandpaTestNet::new(TestApi::new(voters), 2); let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); @@ -1470,6 +1247,9 @@ fn voter_catches_up_to_latest_round_when_behind() { runtime.spawn(voter); } + net.lock().peer(0).push_blocks(50, false); + net.lock().block_until_sync(); + // wait for them to finalize block 50. since they'll vote on 3/4 of the // unfinalized chain it will take at least 4 rounds to do it. let wait_for_finality = ::futures::future::join_all(finality_notifications); @@ -1481,18 +1261,15 @@ fn voter_catches_up_to_latest_round_when_behind() { let runtime = runtime.handle().clone(); wait_for_finality.then(move |_| { - let peer_id = 2; + net.lock().add_full_peer(); + let link = { let net = net.lock(); - let mut link = net.peers[peer_id].data.lock(); + let mut link = net.peers[2].data.lock(); link.take().expect("link initialized at startup; qed") }; - let set_state = link.persistent_data.set_state.clone(); - - let voter = voter(None, peer_id, link, net); - - runtime.spawn(voter); + runtime.spawn(voter(None, 2, link, net.clone())); let start_time = std::time::Instant::now(); let timeout = Duration::from_secs(5 * 60); @@ -1542,7 +1319,6 @@ where { let PersistentData { ref authority_set, - ref consensus_changes, ref set_state, .. } = link.persistent_data; @@ -1566,7 +1342,6 @@ where Environment { authority_set: authority_set.clone(), config: config.clone(), - consensus_changes: consensus_changes.clone(), client: link.client.clone(), select_chain: link.select_chain.clone(), set_id: authority_set.set_id(), @@ -1713,6 +1488,10 @@ fn grandpa_environment_never_overwrites_round_voter_state() { assert_eq!(get_current_round(2).unwrap(), HasVoted::No); + // we need to call `round_data` for the next round to pick up + // from the keystore which authority id we'll be using to vote + environment.round_data(2); + let info = peer.client().info(); let prevote = finality_grandpa::Prevote { @@ -1816,6 +1595,8 @@ fn imports_justification_for_regular_blocks_on_import() { #[test] fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { + use finality_grandpa::voter::Environment; + let alice = Ed25519Keyring::Alice; let voters = make_ids(&[alice]); @@ -1845,6 +1626,10 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { second: signed_prevote.clone(), }; + // we need to call `round_data` to pick up from the keystore which + // authority id we'll be using to vote + environment.round_data(1); + // reporting the equivocation should fail since the offender is a local // authority (i.e. we have keys in our keystore for the given id) let equivocation_proof = sp_finality_grandpa::Equivocation::Prevote(equivocation.clone()); diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 3ac94f3b062f0..c27eab5351562 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 700b0aeb551cd..a861e792755fe 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 871cc3ef426ec..d552a123c3788 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-informant" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" @@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -futures = "0.3.4" +futures = "0.3.9" log = "0.4.8" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-network = { version = "0.8.0", path = "../network" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } +sc-client-api = { version = "3.0.0", path = "../api" } +sc-network = { version = "0.9.0", path = "../network" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } wasm-timer = "0.2" diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 5c8f5f8ef84aa..0caef4e5fbae8 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::OutputFormat; use ansi_term::Colour; diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index c60eda76f63f6..c955834c0f111 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -35,7 +35,9 @@ mod display; /// The format to print telemetry output in. #[derive(Clone, Debug)] pub struct OutputFormat { - /// Enable color output in logs. True by default. + /// Enable color output in logs. + /// + /// Is enabled by default. pub enable_color: bool, } diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index c0c3acde25edf..fd9fd162e6179 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-keystore" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -17,14 +17,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.30" derive_more = "0.99.2" -futures = "0.3.4" +futures = "0.3.9" futures-util = "0.3.4" -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } +sp-application-crypto = { version = "3.0.0", path = "../../primitives/application-crypto" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } hex = "0.4.0" merlin = { version = "2.0", default-features = false } -parking_lot = "0.10.0" +parking_lot = "0.11.1" rand = "0.7.2" serde_json = "1.0.41" subtle = "2.1.1" diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 0b6d654bc623e..9cad56efacfd6 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Keystore (and session key management) for ed25519 based chains like Polkadot. diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 856327d46f6ea..866a50ae4c93c 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,7 +38,7 @@ use sp_keystore::{ SyncCryptoStore, vrf::{VRFTranscriptData, VRFSignature, make_transcript}, }; -use sp_application_crypto::{ed25519, sr25519, ecdsa}; +use sp_application_crypto::{ed25519, sr25519, ecdsa, AppPair, AppKey, IsWrappedBy}; use crate::{Result, Error}; @@ -57,6 +57,14 @@ impl LocalKeystore { let inner = KeystoreInner::new_in_memory(); Self(RwLock::new(inner)) } + + /// Get a key pair for the given public key. + /// + /// This function is only available for a local keystore. If your application plans to work with + /// remote keystores, you do not want to depend on it. + pub fn key_pair(&self, public: &::Public) -> Result { + self.0.read().key_pair::(public) + } } #[async_trait] @@ -159,9 +167,7 @@ impl SyncCryptoStore for LocalKeystore { let all_keys = SyncCryptoStore::keys(self, id)? .into_iter() .collect::>(); - Ok(keys.into_iter() - .filter(|key| all_keys.contains(key)) - .collect::>()) + Ok(keys.into_iter().filter(|key| all_keys.contains(key)).collect::>()) } fn sign_with( @@ -321,7 +327,7 @@ impl KeystoreInner { /// Open the store at the given path. /// /// Optionally takes a password that will be used to encrypt/decrypt the keys. - pub fn open>(path: T, password: Option) -> Result { + fn open>(path: T, password: Option) -> Result { let path = path.into(); fs::create_dir_all(&path)?; @@ -337,7 +343,7 @@ impl KeystoreInner { } /// Create a new in-memory store. - pub fn new_in_memory() -> Self { + fn new_in_memory() -> Self { Self { path: None, additional: HashMap::new(), @@ -365,8 +371,8 @@ impl KeystoreInner { /// Insert a new key with anonymous crypto. /// - /// Places it into the file system store. - pub fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { + /// Places it into the file system store, if a path is configured. + fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { if let Some(path) = self.key_file_path(public, key_type) { let mut file = File::create(path).map_err(Error::Io)?; serde_json::to_writer(&file, &suri).map_err(Error::Json)?; @@ -377,13 +383,16 @@ impl KeystoreInner { /// Generate a new key. /// - /// Places it into the file system store. - pub fn generate_by_type(&self, key_type: KeyTypeId) -> Result { + /// Places it into the file system store, if a path is configured. Otherwise insert + /// it into the memory cache only. + fn generate_by_type(&mut self, key_type: KeyTypeId) -> Result { let (pair, phrase, _) = Pair::generate_with_phrase(self.password()); if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { let mut file = File::create(path)?; serde_json::to_writer(&file, &phrase)?; file.flush()?; + } else { + self.insert_ephemeral_pair(&pair, &phrase, key_type); } Ok(pair) } @@ -391,7 +400,7 @@ impl KeystoreInner { /// Create a new key from seed. /// /// Does not place it into the file system store. - pub fn insert_ephemeral_from_seed_by_type( + fn insert_ephemeral_from_seed_by_type( &mut self, seed: &str, key_type: KeyTypeId, @@ -414,7 +423,7 @@ impl KeystoreInner { } /// Get a key pair for the given public key and key type. - pub fn key_pair_by_type(&self, + fn key_pair_by_type(&self, public: &Pair::Public, key_type: KeyTypeId, ) -> Result { @@ -470,6 +479,11 @@ impl KeystoreInner { Ok(public_keys) } + + /// Get a key pair for the given public key. + pub fn key_pair(&self, public: &::Public) -> Result { + self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) + } } @@ -479,62 +493,49 @@ mod tests { use tempfile::TempDir; use sp_core::{ Pair, - crypto::{IsWrappedBy, Ss58Codec}, + crypto::Ss58Codec, testing::SR25519, }; - use sp_application_crypto::{ed25519, sr25519, AppPublic, AppKey, AppPair}; + use sp_application_crypto::{ed25519, sr25519, AppPublic}; use std::{ fs, str::FromStr, }; - /// Generate a new key. - /// - /// Places it into the file system store. - fn generate(store: &KeystoreInner) -> Result { - store.generate_by_type::(Pair::ID).map(Into::into) - } + const TEST_KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); - /// Create a new key from seed. - /// - /// Does not place it into the file system store. - fn insert_ephemeral_from_seed(store: &mut KeystoreInner, seed: &str) -> Result { - store.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) - } + impl KeystoreInner { + fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { + self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) + } - /// Get public keys of all stored keys that match the key type. - /// - /// This will just use the type of the public key (a list of which to be returned) in order - /// to determine the key type. Unless you use a specialized application-type public key, then - /// this only give you keys registered under generic cryptography, and will not return keys - /// registered under the application type. - fn public_keys(store: &KeystoreInner) -> Result> { - store.raw_public_keys(Public::ID) - .map(|v| { - v.into_iter() - .map(|k| Public::from_slice(k.as_slice())) - .collect() - }) - } + fn public_keys(&self) -> Result> { + self.raw_public_keys(Public::ID) + .map(|v| { + v.into_iter() + .map(|k| Public::from_slice(k.as_slice())) + .collect() + }) + } - /// Get a key pair for the given public key. - fn key_pair(store: &KeystoreInner, public: &::Public) -> Result { - store.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) + fn generate(&mut self) -> Result { + self.generate_by_type::(Pair::ID).map(Into::into) + } } #[test] fn basic_store() { let temp_dir = TempDir::new().unwrap(); - let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); + let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - assert!(public_keys::(&store).unwrap().is_empty()); + assert!(store.public_keys::().unwrap().is_empty()); - let key: ed25519::AppPair = generate(&store).unwrap(); - let key2: ed25519::AppPair = key_pair(&store, &key.public()).unwrap(); + let key: ed25519::AppPair = store.generate().unwrap(); + let key2: ed25519::AppPair = store.key_pair(&key.public()).unwrap(); assert_eq!(key.public(), key2.public()); - assert_eq!(public_keys::(&store).unwrap()[0], key.public()); + assert_eq!(store.public_keys::().unwrap()[0], key.public()); } #[test] @@ -542,8 +543,7 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - let pair: ed25519::AppPair = insert_ephemeral_from_seed( - &mut store, + let pair: ed25519::AppPair = store.insert_ephemeral_from_seed( "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc" ).unwrap(); assert_eq!( @@ -554,27 +554,27 @@ mod tests { drop(store); let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); // Keys generated from seed should not be persisted! - assert!(key_pair::(&store, &pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).is_err()); } #[test] fn password_being_used() { let password = String::from("password"); let temp_dir = TempDir::new().unwrap(); - let store = KeystoreInner::open( + let mut store = KeystoreInner::open( temp_dir.path(), Some(FromStr::from_str(password.as_str()).unwrap()), ).unwrap(); - let pair: ed25519::AppPair = generate(&store).unwrap(); + let pair: ed25519::AppPair = store.generate().unwrap(); assert_eq!( pair.public(), - key_pair::(&store, &pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().public(), ); // Without the password the key should not be retrievable let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - assert!(key_pair::(&store, &pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).is_err()); let store = KeystoreInner::open( temp_dir.path(), @@ -582,7 +582,7 @@ mod tests { ).unwrap(); assert_eq!( pair.public(), - key_pair::(&store, &pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().public(), ); } @@ -593,18 +593,17 @@ mod tests { let mut keys = Vec::new(); for i in 0..10 { - keys.push(generate::(&store).unwrap().public()); - keys.push(insert_ephemeral_from_seed::( - &mut store, + keys.push(store.generate::().unwrap().public()); + keys.push(store.insert_ephemeral_from_seed::( &format!("0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", i), ).unwrap().public()); } // Generate a key of a different type - generate::(&store).unwrap(); + store.generate::().unwrap(); keys.sort(); - let mut store_pubs = public_keys::(&store).unwrap(); + let mut store_pubs = store.public_keys::().unwrap(); store_pubs.sort(); assert_eq!(keys, store_pubs); @@ -644,4 +643,27 @@ mod tests { SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty(), ); } + + #[test] + fn generate_with_seed_is_not_stored() { + let temp_dir = TempDir::new().unwrap(); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + let _alice_tmp_key = SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + + drop(store); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 0); + } + + #[test] + fn generate_can_be_fetched_in_memory() { + let store = LocalKeystore::in_memory(); + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 2); + } } diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index d9fecb7aa8fa2..1b45dbf5c0c54 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "components for a light client" name = "sc-light" -version = "2.0.0" +version = "3.0.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -11,18 +11,18 @@ documentation = "https://docs.rs/sc-light" readme = "README.md" [dependencies] -parking_lot = "0.10.0" +parking_lot = "0.11.1" lazy_static = "1.4.0" hash-db = "0.15.2" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0", path = "../executor" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor = { version = "0.9.0", path = "../executor" } [features] default = [] diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index be7953e528bd8..27e0754eb552e 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -441,14 +441,14 @@ impl StateBackend for GenesisOrUnavailableState } } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(child_info, action), + state.apply_to_child_keys_while(child_info, action), GenesisOrUnavailableState::Unavailable => (), } } diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index 3b5753f2849d5..bcabc365676a5 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -128,6 +128,13 @@ impl BlockchainBackend for Blockchain where Block: BlockT, S fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { Err(ClientError::NotAvailableOnLightClient) } + + fn extrinsic( + &self, + _hash: &Block::Hash, + ) -> ClientResult::Extrinsic>> { + Err(ClientError::NotAvailableOnLightClient) + } } impl, Block: BlockT> ProvideCache for Blockchain { diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index fa0f02cd5aed9..8b403823b0eec 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,7 +25,6 @@ use std::{ use codec::{Encode, Decode}; use sp_core::{ convert_hash, NativeOrEncoded, traits::{CodeExecutor, SpawnNamed}, - offchain::storage::OffchainOverlayedChanges, }; use sp_runtime::{ generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HashFor}, @@ -113,7 +112,6 @@ impl CallExecutor for method: &str, call_data: &[u8], changes: &RefCell, - offchain_changes: &RefCell, _: Option<&RefCell>>, initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, @@ -140,7 +138,6 @@ impl CallExecutor for method, call_data, changes, - offchain_changes, None, initialize_block, ExecutionManager::NativeWhenPossible, @@ -276,7 +273,8 @@ pub fn check_execution_proof_with_make_header( // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code.runtime_code()?; + let runtime_code = backend_runtime_code.runtime_code() + .map_err(|_e| ClientError::RuntimeCodeMissing)?; execution_proof_check_on_trie_backend::( &trie_backend, diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 33113c2fc7df0..b71c4871803da 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -239,7 +239,7 @@ impl FetchChecker for LightDataChecker convert_hash(request.header.state_root()), remote_proof, request.keys.iter(), - ).map_err(Into::into) + ).map_err(|e| ClientError::from(e)) } fn check_read_child_proof( @@ -249,14 +249,14 @@ impl FetchChecker for LightDataChecker ) -> ClientResult, Option>>> { let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err(ClientError::InvalidChildType), }; read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, &child_info, request.keys.iter(), - ).map_err(Into::into) + ).map_err(|e| ClientError::from(e)) } fn check_execution_proof( @@ -292,10 +292,10 @@ impl FetchChecker for LightDataChecker if *request.header.extrinsics_root() == extrinsics_root { Ok(body) } else { - Err(format!("RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", - *request.header.extrinsics_root(), - extrinsics_root, - ).into()) + Err(ClientError::ExtrinsicRootInvalid { + received: request.header.extrinsics_root().to_string(), + expected: extrinsics_root.to_string(), + }) } } diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index 899d1ae31a3dd..e647b8743cc0f 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 94d9272f4bbd2..4da356f92d685 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0" +version = "0.9.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -15,17 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.34.0", default-features = false } log = "0.4.8" -lru = "0.4.3" -sc-network = { version = "0.8.0", path = "../network" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +lru = "0.6.1" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } +sc-network = { version = "0.9.0", path = "../network" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } wasm-timer = "0.2" [dev-dependencies] async-std = "1.6.5" -quickcheck = "0.9.0" +quickcheck = "1.0.3" rand = "0.7.2" substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 70c2942597aa5..235ac98dc3968 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::{Network, Validator}; use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}; @@ -23,7 +25,8 @@ use futures::prelude::*; use futures::channel::mpsc::{channel, Sender, Receiver}; use libp2p::PeerId; use log::trace; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use prometheus_endpoint::Registry; +use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, collections::{HashMap, VecDeque}, @@ -38,7 +41,7 @@ pub struct GossipEngine { state_machine: ConsensusGossip, network: Box + Send>, periodic_maintenance_interval: futures_timer::Delay, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Incoming events from the network. network_event_stream: Pin + Send>>, @@ -68,20 +71,18 @@ impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( network: N, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol: impl Into>, validator: Arc>, + metrics_registry: Option<&Registry>, ) -> Self where B: 'static { - // We grab the event stream before registering the notifications protocol, otherwise we - // might miss events. + let protocol = protocol.into(); let network_event_stream = network.event_stream(); - network.register_notifications_protocol(engine_id, protocol_name.into()); GossipEngine { - state_machine: ConsensusGossip::new(validator, engine_id), + state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), network: Box::new(network), periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), - engine_id, + protocol, network_event_stream, message_sinks: HashMap::new(), @@ -165,7 +166,7 @@ impl GossipEngine { /// /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. - pub fn announce(&self, block: B::Hash, associated_data: Vec) { + pub fn announce(&self, block: B::Hash, associated_data: Option>) { self.network.announce(block, associated_data); } } @@ -181,21 +182,27 @@ impl Future for GossipEngine { ForwardingState::Idle => { match this.network_event_stream.poll_next_unpin(cx) { Poll::Ready(Some(event)) => match event { - Event::NotificationStreamOpened { remote, engine_id, role } => { - if engine_id != this.engine_id { + Event::SyncConnected { remote } => { + this.network.add_set_reserved(remote, this.protocol.clone()); + } + Event::SyncDisconnected { remote } => { + this.network.remove_set_reserved(remote, this.protocol.clone()); + } + Event::NotificationStreamOpened { remote, protocol, role } => { + if protocol != this.protocol { continue; } this.state_machine.new_peer(&mut *this.network, remote, role); } - Event::NotificationStreamClosed { remote, engine_id } => { - if engine_id != this.engine_id { + Event::NotificationStreamClosed { remote, protocol } => { + if protocol != this.protocol { continue; } this.state_machine.peer_disconnected(&mut *this.network, remote); }, Event::NotificationsReceived { remote, messages } => { let messages = messages.into_iter().filter_map(|(engine, data)| { - if engine == this.engine_id { + if engine == this.protocol { Some(data.to_vec()) } else { None @@ -296,9 +303,9 @@ mod tests { use crate::{ValidationResult, ValidatorContext}; use futures::{channel::mpsc::{unbounded, UnboundedSender}, executor::{block_on, block_on_stream}, future::poll_fn}; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use rand::Rng; use sc_network::ObservedRole; use sp_runtime::{testing::H256, traits::{Block as BlockT}}; + use std::borrow::Cow; use std::convert::TryInto; use std::sync::{Arc, Mutex}; use substrate_test_runtime_client::runtime::Block; @@ -325,17 +332,21 @@ mod tests { fn report_peer(&self, _: PeerId, _: ReputationChange) { } - fn disconnect_peer(&self, _: PeerId) { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { - unimplemented!(); + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { + } - fn announce(&self, _: B::Hash, _: Vec) { + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + unimplemented!(); + } + + fn announce(&self, _: B::Hash, _: Option>) { unimplemented!(); } } @@ -361,9 +372,9 @@ mod tests { let network = TestNetwork::default(); let mut gossip_engine = GossipEngine::::new( network.clone(), - [1, 2, 3, 4], - "my_protocol", - Arc::new(AllowAll{}), + "/my_protocol", + Arc::new(AllowAll {}), + None, ); // Drop network event stream sender side. @@ -383,15 +394,15 @@ mod tests { #[test] fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() { let topic = H256::default(); - let engine_id = [1, 2, 3, 4]; + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); let mut gossip_engine = GossipEngine::::new( network.clone(), - engine_id.clone(), - "my_protocol", - Arc::new(AllowAll{}), + protocol.clone(), + Arc::new(AllowAll {}), + None, ); let mut event_sender = network.inner.lock() @@ -404,7 +415,7 @@ mod tests { event_sender.start_send( Event::NotificationStreamOpened { remote: remote_peer.clone(), - engine_id: engine_id.clone(), + protocol: protocol.clone(), role: ObservedRole::Authority, } ).expect("Event stream is unbounded; qed."); @@ -413,7 +424,7 @@ mod tests { let events = messages.iter().cloned().map(|m| { Event::NotificationsReceived { remote: remote_peer.clone(), - messages: vec![(engine_id, m.into())] + messages: vec![(protocol.clone(), m.into())] } }).collect::>(); @@ -457,12 +468,14 @@ mod tests { } impl Arbitrary for ChannelLengthAndTopic { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { + let possible_length = (0..100).collect::>(); + let possible_topics = (0..10).collect::>(); Self { - length: g.gen_range(0, 100), + length: *g.choose(&possible_length).unwrap(), // Make sure channel topics and message topics overlap by choosing a small // range. - topic: H256::from_low_u64_ne(g.gen_range(0, 10)), + topic: H256::from_low_u64_ne(*g.choose(&possible_topics).unwrap()), } } } @@ -473,11 +486,12 @@ mod tests { } impl Arbitrary for Message{ - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { + let possible_topics = (0..10).collect::>(); Self { // Make sure channel topics and message topics overlap by choosing a small // range. - topic: H256::from_low_u64_ne(g.gen_range(0, 10)), + topic: H256::from_low_u64_ne(*g.choose(&possible_topics).unwrap()), } } } @@ -498,7 +512,7 @@ mod tests { } fn prop(channels: Vec, notifications: Vec>) { - let engine_id = [1, 2, 3, 4]; + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); @@ -524,9 +538,9 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), - engine_id.clone(), - "my_protocol", - Arc::new(TestValidator{}), + protocol.clone(), + Arc::new(TestValidator {}), + None, ); // Create channels. @@ -542,8 +556,10 @@ mod tests { // Insert sender sides into `gossip_engine`. for (topic, tx) in txs { match gossip_engine.message_sinks.get_mut(&topic) { - Some(entry) => entry.push(tx), - None => {gossip_engine.message_sinks.insert(topic, vec![tx]);}, + Some(entry) => entry.push(tx), + None => { + gossip_engine.message_sinks.insert(topic, vec![tx]); + } } } @@ -558,7 +574,7 @@ mod tests { event_sender.start_send( Event::NotificationStreamOpened { remote: remote_peer.clone(), - engine_id: engine_id.clone(), + protocol: protocol.clone(), role: ObservedRole::Authority, } ).expect("Event stream is unbounded; qed."); @@ -576,7 +592,7 @@ mod tests { message.push(i_notification.try_into().unwrap()); message.push(i_message.try_into().unwrap()); - (engine_id, message.into()) + (protocol.clone(), message.into()) }).collect(); event_sender.start_send(Event::NotificationsReceived { diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 1d566ed3cbba2..f8b6e8f0c2fdc 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Polite gossiping. //! @@ -33,11 +35,16 @@ //! - Implement the `Network` trait, representing the low-level networking primitives. It is //! already implemented on `sc_network::NetworkService`. //! - Implement the `Validator` trait. See the section below. -//! - Decide on a `ConsensusEngineId`. Each gossiping protocol should have a different one. +//! - Decide on a protocol name. Each gossiping protocol should have a different one. //! - Build a `GossipEngine` using these three elements. //! - Use the methods of the `GossipEngine` in order to send out messages and receive incoming //! messages. //! +//! The `GossipEngine` will automatically use `Network::add_set_reserved` and +//! `Network::remove_set_reserved` to maintain a set of peers equal to the set of peers the +//! node is syncing from. See the documentation of `sc-network` for more explanations about the +//! concepts of peer sets. +//! //! # What is a validator? //! //! The primary role of a `Validator` is to process incoming messages from peers, and decide @@ -59,9 +66,9 @@ pub use self::state_machine::TopicNotification; pub use self::validator::{DiscardAll, MessageIntent, Validator, ValidatorContext, ValidationResult}; use futures::prelude::*; -use sc_network::{Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::{borrow::Cow, pin::Pin, sync::Arc}; +use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; +use sp_runtime::{traits::Block as BlockT}; +use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; mod bridge; mod state_machine; @@ -75,26 +82,23 @@ pub trait Network { /// Adjust the reputation of a node. fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); + /// Adds the peer to the set of peers to be connected to with this protocol. + fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Removes the peer from the set of peers to be connected to with this protocol. + fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); + /// Force-disconnect a peer. - fn disconnect_peer(&self, who: PeerId); + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec); - - /// Registers a notifications protocol. - /// - /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. - fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, - ); + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); /// Notify everyone we're connected to that we have the given block. /// /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. - fn announce(&self, block: B::Hash, associated_data: Vec); + fn announce(&self, block: B::Hash, associated_data: Option>); } impl Network for Arc> { @@ -106,23 +110,33 @@ impl Network for Arc> { NetworkService::report_peer(self, peer_id, reputation); } - fn disconnect_peer(&self, who: PeerId) { - NetworkService::disconnect_peer(self, who) + fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { + let addr = iter::once(multiaddr::Protocol::P2p(who.into())) + .collect::(); + let result = NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); + if let Err(err) = result { + log::error!(target: "gossip", "add_set_reserved failed: {}", err); + } + } + + fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { + let addr = iter::once(multiaddr::Protocol::P2p(who.into())) + .collect::(); + let result = NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(addr).collect()); + if let Err(err) = result { + log::error!(target: "gossip", "remove_set_reserved failed: {}", err); + } } - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec) { - NetworkService::write_notification(self, who, engine_id, message) + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + NetworkService::disconnect_peer(self, who, protocol) } - fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, - ) { - NetworkService::register_notifications_protocol(self, engine_id, protocol_name) + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { + NetworkService::write_notification(self, who, protocol, message) } - fn announce(&self, block: B::Hash, associated_data: Vec) { + fn announce(&self, block: B::Hash, associated_data: Option>) { NetworkService::announce_block(self, block, associated_data) } } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 60c669ecb6680..a58432d8c2476 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,20 +18,29 @@ use crate::{Network, MessageIntent, Validator, ValidatorContext, ValidationResult}; +use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::iter; use std::time; -use log::{error, trace}; +use log::{debug, error, trace}; use lru::LruCache; use libp2p::PeerId; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use sp_runtime::ConsensusEngineId; use sc_network::ObservedRole; use wasm_timer::Instant; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 -const KNOWN_MESSAGES_CACHE_SIZE: usize = 4096; +// NOTE: The current value is adjusted based on largest production network deployment (Kusama) and +// the current main gossip user (GRANDPA). Currently there are ~800 validators on Kusama, as such, +// each GRANDPA round should generate ~1600 messages, and we currently keep track of the last 2 +// completed rounds and the current live one. That makes it so that at any point we will be holding +// ~4800 live messages. +// +// Assuming that each known message is tracked with a 32 byte hash (common for `Block::Hash`), then +// this cache should take about 256 KB of memory. +const KNOWN_MESSAGES_CACHE_SIZE: usize = 8192; const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); @@ -89,7 +98,7 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { /// Send addressed message to a peer. fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(who.clone(), self.gossip.engine_id, message); + self.network.write_notification(who.clone(), self.gossip.protocol.clone(), message); } /// Send all messages with given topic to a peer. @@ -100,7 +109,7 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { fn propagate<'a, B: BlockT, I>( network: &mut dyn Network, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, messages: I, intent: MessageIntent, peers: &mut HashMap>, @@ -138,7 +147,7 @@ fn propagate<'a, B: BlockT, I>( peer.known_messages.insert(message_hash.clone()); trace!(target: "gossip", "Propagating to {}: {:?}", id, message); - network.write_notification(id.clone(), engine_id, message.clone()); + network.write_notification(id.clone(), protocol.clone(), message.clone()); } } } @@ -148,21 +157,36 @@ pub struct ConsensusGossip { peers: HashMap>, messages: Vec>, known_messages: LruCache, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, validator: Arc>, next_broadcast: Instant, + metrics: Option, } impl ConsensusGossip { /// Create a new instance using the given validator. - pub fn new(validator: Arc>, engine_id: ConsensusEngineId) -> Self { + pub fn new( + validator: Arc>, + protocol: Cow<'static, str>, + metrics_registry: Option<&Registry>, + ) -> Self { + let metrics = match metrics_registry.map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + debug!(target: "gossip", "Failed to register metrics: {:?}", e); + None + } + None => None, + }; + ConsensusGossip { peers: HashMap::new(), messages: Default::default(), known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), - engine_id, + protocol, validator, next_broadcast: Instant::now() + REBROADCAST_INTERVAL, + metrics, } } @@ -197,6 +221,10 @@ impl ConsensusGossip { message, sender, }); + + if let Some(ref metrics) = self.metrics { + metrics.registered_messages.inc(); + } } } @@ -235,7 +263,14 @@ impl ConsensusGossip { fn rebroadcast(&mut self, network: &mut dyn Network) { let messages = self.messages.iter() .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); - propagate(network, self.engine_id, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + messages, + MessageIntent::PeriodicRebroadcast, + &mut self.peers, + &self.validator + ); } /// Broadcast all messages with given topic. @@ -247,7 +282,7 @@ impl ConsensusGossip { } else { None } ); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, messages, intent, &mut self.peers, &self.validator); + propagate(network, self.protocol.clone(), messages, intent, &mut self.peers, &self.validator); } /// Prune old or no longer relevant consensus messages. Provide a predicate @@ -257,10 +292,17 @@ impl ConsensusGossip { let before = self.messages.len(); let mut message_expired = self.validator.message_expired(); - self.messages.retain(|entry| !message_expired(entry.topic, &entry.message)); + self.messages + .retain(|entry| !message_expired(entry.topic, &entry.message)); + + let expired_messages = before - self.messages.len(); + + if let Some(ref metrics) = self.metrics { + metrics.expired_messages.inc_by(expired_messages as u64) + } trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", - before - self.messages.len(), + expired_messages, self.messages.len(), known_messages.len(), ); @@ -374,7 +416,7 @@ impl ConsensusGossip { peer.known_messages.insert(entry.message_hash.clone()); trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); - network.write_notification(who.clone(), self.engine_id, entry.message.clone()); + network.write_notification(who.clone(), self.protocol.clone(), entry.message.clone()); } } } @@ -390,7 +432,14 @@ impl ConsensusGossip { let message_hash = HashFor::::hash(&message); self.register_message_hashed(message_hash, topic, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + iter::once((&message_hash, &topic, &message)), + intent, + &mut self.peers, + &self.validator + ); } /// Send addressed message to a peer. The message is not kept or multicast @@ -411,7 +460,33 @@ impl ConsensusGossip { trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); peer.known_messages.insert(message_hash); - network.write_notification(who.clone(), self.engine_id, message); + network.write_notification(who.clone(), self.protocol.clone(), message); + } +} + +struct Metrics { + registered_messages: Counter, + expired_messages: Counter, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + registered_messages: register( + Counter::new( + "network_gossip_registered_messages_total", + "Number of registered messages by the gossip service.", + )?, + registry, + )?, + expired_messages: register( + Counter::new( + "network_gossip_expired_messages_total", + "Number of expired messages by the gossip service.", + )?, + registry, + )?, + }) } } @@ -481,17 +556,21 @@ mod tests { self.inner.lock().unwrap().peer_reports.push((peer_id, reputation_change)); } - fn disconnect_peer(&self, _: PeerId) { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { - unimplemented!(); + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { + } + + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + unimplemented!(); + } - fn announce(&self, _: B::Hash, _: Vec) { + fn announce(&self, _: B::Hash, _: Option>) { unimplemented!(); } } @@ -520,7 +599,7 @@ mod tests { let prev_hash = H256::random(); let best_hash = H256::random(); - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let m1_hash = H256::random(); let m2_hash = H256::random(); let m1 = vec![1, 2, 3]; @@ -547,11 +626,11 @@ mod tests { #[test] fn message_stream_include_those_sent_before_asking() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); // Register message. let message = vec![4, 5, 6]; - let topic = HashFor::::hash(&[1,2,3]); + let topic = HashFor::::hash(&[1, 2, 3]); consensus.register_message(topic, message.clone()); assert_eq!( @@ -562,7 +641,7 @@ mod tests { #[test] fn can_keep_multiple_messages_per_topic() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let topic = [1; 32].into(); let msg_a = vec![1, 2, 3]; @@ -576,7 +655,7 @@ mod tests { #[test] fn peer_is_removed_on_disconnect() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let mut network = NoOpNetwork::default(); @@ -590,14 +669,12 @@ mod tests { #[test] fn on_incoming_ignores_discarded_messages() { - let to_forward = ConsensusGossip::::new( - Arc::new(DiscardAll), - [0, 0, 0, 0], - ).on_incoming( - &mut NoOpNetwork::default(), - PeerId::random(), - vec![vec![1, 2, 3]], - ); + let to_forward = ConsensusGossip::::new(Arc::new(DiscardAll), "/foo".into(), None) + .on_incoming( + &mut NoOpNetwork::default(), + PeerId::random(), + vec![vec![1, 2, 3]], + ); assert!( to_forward.is_empty(), @@ -610,15 +687,13 @@ mod tests { let mut network = NoOpNetwork::default(); let remote = PeerId::random(); - let to_forward = ConsensusGossip::::new( - Arc::new(AllowAll), - [0, 0, 0, 0], - ).on_incoming( - &mut network, - // Unregistered peer. - remote.clone(), - vec![vec![1, 2, 3]], - ); + let to_forward = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None) + .on_incoming( + &mut network, + // Unregistered peer. + remote.clone(), + vec![vec![1, 2, 3]], + ); assert!( to_forward.is_empty(), diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index fd29aaddafe6d..4b5440c1a06f3 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index f5ebee39db563..87e960fb64263 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0" +version = "0.9.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,68 +14,71 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.6.1" +prost-build = "0.7" [dependencies] async-trait = "0.1" async-std = "1.6.5" bitflags = "1.2.0" -bs58 = "0.3.1" -bytes = "0.5.0" -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +bs58 = "0.4.0" +cid = "0.6.0" +bytes = "1" +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } -futures = "0.3.4" +fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } +futures = "0.3.9" futures-timer = "3.0.2" -futures_codec = "0.4.0" +asynchronous-codec = "0.5" hex = "0.4.0" ip_network = "0.3.4" linked-hash-map = "0.5.2" linked_hash_set = "0.1.3" +lru = "0.6.3" log = "0.4.8" -lru = "0.4.0" nohash-hasher = "0.2.0" -parking_lot = "0.10.0" -pin-project = "0.4.6" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } -prost = "0.6.1" +parking_lot = "0.11.1" +pin-project = "1.0.4" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } +prost = "0.7" rand = "0.7.2" -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-peerset = { version = "2.0.0", path = "../peerset" } +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +sc-client-api = { version = "3.0.0", path = "../api" } +sc-peerset = { version = "3.0.0", path = "../peerset" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -slog = { version = "2.5.2", features = ["nested-values"] } -slog_derive = "0.2.0" -smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +smallvec = "1.5.0" +sp-arithmetic = { version = "3.0.0", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } thiserror = "1" -unsigned-varint = { version = "0.4.0", features = ["futures", "futures-codec"] } +unsigned-varint = { version = "0.6.0", features = ["futures", "asynchronous_codec"] } void = "1.0.2" wasm-timer = "0.2" -zeroize = "1.0.0" +zeroize = "1.2.0" [dependencies.libp2p] -version = "0.29.1" +version = "0.34.0" + +[target.'cfg(target_os = "unknown")'.dependencies.libp2p] +version = "0.34.0" default-features = false -features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] +features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "request-response", "tcp-async-io", "websocket", "yamux"] + [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.29.1", default-features = false } -quickcheck = "0.9.0" +libp2p = { version = "0.34.0", default-features = false } +quickcheck = "1.0.3" rand = "0.7.2" -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/network/README.md b/client/network/README.md index e0bd691043bee..914720f53e2a9 100644 --- a/client/network/README.md +++ b/client/network/README.md @@ -120,8 +120,8 @@ bytes. block announces are pushed to other nodes. The handshake is empty on both sides. The message format is a SCALE-encoded tuple containing a block header followed with an opaque list of bytes containing some data associated with this block announcement, e.g. a candidate message. -- Notifications protocols that are registered using the `register_notifications_protocol` -method. For example: `/paritytech/grandpa/1`. See below for more information. +- Notifications protocols that are registered using `NetworkConfiguration::notifications_protocols`. +For example: `/paritytech/grandpa/1`. See below for more information. ## The legacy Substrate substream @@ -223,4 +223,4 @@ dispatching a background task with the [`NetworkWorker`]. More precise usage details are still being worked on and will likely change in the future. -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/network/build.rs b/client/network/build.rs index 8ed460f163eb4..0eea622e87574 100644 --- a/client/network/build.rs +++ b/client/network/build.rs @@ -1,7 +1,7 @@ const PROTOS: &[&str] = &[ "src/schema/api.v1.proto", - "src/schema/finality.v1.proto", - "src/schema/light.v1.proto" + "src/schema/light.v1.proto", + "src/schema/bitswap.v1.2.0.proto", ]; fn main() { diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index c8684eba625c5..06c91de886877 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -1,36 +1,43 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::{ - config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, - peer_info, request_responses, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - protocol::{message::{self, Roles}, CustomMessageOutcome, NotificationsSink, Protocol}, + config::{ProtocolId, Role}, + bitswap::Bitswap, + discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, + peer_info, request_responses, light_client_requests, ObservedRole, DhtEvent, ExHashT, }; use bytes::Bytes; -use codec::Encode as _; +use futures::{channel::oneshot, stream::StreamExt}; use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; use libp2p::identify::IdentifyInfo; use libp2p::kad::record; -use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; +use libp2p::swarm::{ + NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters, toggle::Toggle +}; use log::debug; +use prost::Message; use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; +use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justification}; use std::{ borrow::Cow, collections::{HashSet, VecDeque}, @@ -40,7 +47,8 @@ use std::{ }; pub use crate::request_responses::{ - ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, RequestId, SendRequestError + ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, RequestId, + IfDisconnected }; /// General behaviour of the network. Combines all protocols together. @@ -54,14 +62,10 @@ pub struct Behaviour { peer_info: peer_info::PeerInfoBehaviour, /// Discovers nodes of the network. discovery: DiscoveryBehaviour, + /// Bitswap server for blockchain data. + bitswap: Toggle>, /// Generic request-reponse protocols. request_responses: request_responses::RequestResponsesBehaviour, - /// Block request handling. - block_requests: block_requests::BlockRequests, - /// Finality proof request handling. - finality_proof_requests: finality_requests::FinalityProofRequests, - /// Light client request handling. - light_client_handler: light_client_handler::LightClientHandler, /// Queue of events to produce for the outside. #[behaviour(ignore)] @@ -70,13 +74,21 @@ pub struct Behaviour { /// Role of our local node, as originally passed from the configuration. #[behaviour(ignore)] role: Role, + + /// Light client request handling. + #[behaviour(ignore)] + light_client_request_sender: light_client_requests::sender::LightClientRequestSender, + + /// Protocol name used to send out block requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + block_request_protocol_name: String, } /// Event generated by `Behaviour`. pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), /// Started a random iterative Kademlia discovery query. RandomKademliaStarted(ProtocolId), @@ -94,34 +106,18 @@ pub enum BehaviourOut { result: Result, }, - /// A request initiated using [`Behaviour::send_request`] has succeeded or failed. - RequestFinished { - /// Request that has succeeded. - request_id: RequestId, - /// Response sent by the remote or reason for failure. - result: Result, RequestFailure>, - }, - - /// Started a new request with the given node. - /// - /// This event is for statistics purposes only. The request and response handling are entirely - /// internal to the behaviour. - OpaqueRequestStarted { - peer: PeerId, - /// Protocol name of the request. - protocol: String, - }, - /// Finished, successfully or not, a previously-started request. + /// A request has succeeded or failed. /// - /// This event is for statistics purposes only. The request and response handling are entirely - /// internal to the behaviour. - OpaqueRequestFinished { - /// Who we were requesting. + /// This event is generated for statistics purposes. + RequestFinished { + /// Peer that we send a request to. peer: PeerId, - /// Protocol name of the request. - protocol: String, - /// How long before the response came or the request got cancelled. - request_duration: Duration, + /// Name of the protocol in question. + protocol: Cow<'static, str>, + /// Duration the request took. + duration: Duration, + /// Result of the request. + result: Result<(), RequestFailure>, }, /// Opened a substream with the given node with the given notifications protocol. @@ -131,7 +127,7 @@ pub enum BehaviourOut { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, /// Role of the remote. @@ -147,7 +143,7 @@ pub enum BehaviourOut { /// Id of the peer we are connected to. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -158,7 +154,7 @@ pub enum BehaviourOut { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -166,9 +162,15 @@ pub enum BehaviourOut { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, + /// Now connected to a new peer for syncing purposes. + SyncConnected(PeerId), + + /// No longer connected to a peer for syncing purposes. + SyncDisconnected(PeerId), + /// Events generated by a DHT as a response to get_value or put_value requests as well as the /// request duration. Dht(DhtEvent, Duration), @@ -181,23 +183,32 @@ impl Behaviour { role: Role, user_agent: String, local_public_key: PublicKey, - block_requests: block_requests::BlockRequests, - finality_proof_requests: finality_requests::FinalityProofRequests, - light_client_handler: light_client_handler::LightClientHandler, + light_client_request_sender: light_client_requests::sender::LightClientRequestSender, disco_config: DiscoveryConfig, - request_response_protocols: Vec, + block_request_protocol_config: request_responses::ProtocolConfig, + bitswap: Option>, + light_client_request_protocol_config: request_responses::ProtocolConfig, + // All remaining request protocol configs. + mut request_response_protocols: Vec, ) -> Result { + // Extract protocol name and add to `request_response_protocols`. + let block_request_protocol_name = block_request_protocol_config.name.to_string(); + request_response_protocols.push(block_request_protocol_config); + + request_response_protocols.push(light_client_request_protocol_config); + Ok(Behaviour { substrate, peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), + bitswap: bitswap.into(), request_responses: request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, - block_requests, - finality_proof_requests, - light_client_handler, + light_client_request_sender, events: VecDeque::new(), role, + + block_request_protocol_name, }) } @@ -239,41 +250,15 @@ impl Behaviour { } /// Initiates sending a request. - /// - /// An error is returned if we are not connected to the target peer of if the protocol doesn't - /// match one that has been registered. - pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) - -> Result - { - self.request_responses.send_request(target, protocol, request) - } - - /// Registers a new notifications protocol. - /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - /// - /// You are very strongly encouraged to call this method very early on. Any connection open - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notifications_protocol( + pub fn send_request( &mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + target: &PeerId, + protocol: &str, + request: Vec, + pending_response: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, ) { - // This is the message that we will send to the remote as part of the initial handshake. - // At the moment, we force this to be an encoded `Roles`. - let handshake_message = Roles::from(&self.role).encode(); - - let list = self.substrate.register_notifications_protocol(engine_id, protocol_name, handshake_message); - for (remote, roles, notifications_sink) in list { - let role = reported_roles_to_observed_role(&self.role, remote, roles); - self.events.push_back(BehaviourOut::NotificationStreamOpened { - remote: remote.clone(), - engine_id, - role, - notifications_sink: notifications_sink.clone(), - }); - } + self.request_responses.send_request(target, protocol, request, pending_response, connect) } /// Returns a shared reference to the user protocol. @@ -297,8 +282,11 @@ impl Behaviour { } /// Issue a light client request. - pub fn light_client_request(&mut self, r: light_client_handler::Request) -> Result<(), light_client_handler::Error> { - self.light_client_handler.request(r) + pub fn light_client_request( + &mut self, + r: light_client_requests::sender::Request, + ) -> Result<(), light_client_requests::sender::SendRequestError> { + self.light_client_request_sender.request(r) } } @@ -333,65 +321,54 @@ Behaviour { self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => self.events.push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), - CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => - self.events.push_back(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), - CustomMessageOutcome::BlockRequest { target, request } => { - match self.block_requests.send_request(&target, request) { - block_requests::SendRequestOutcome::Ok => { - self.events.push_back(BehaviourOut::OpaqueRequestStarted { - peer: target, - protocol: self.block_requests.protocol_name().to_owned(), - }); - }, - block_requests::SendRequestOutcome::Replaced { request_duration, .. } => { - self.events.push_back(BehaviourOut::OpaqueRequestFinished { - peer: target.clone(), - protocol: self.block_requests.protocol_name().to_owned(), - request_duration, - }); - self.events.push_back(BehaviourOut::OpaqueRequestStarted { - peer: target, - protocol: self.block_requests.protocol_name().to_owned(), - }); - } - block_requests::SendRequestOutcome::NotConnected | - block_requests::SendRequestOutcome::EncodeError(_) => {}, + CustomMessageOutcome::BlockRequest { target, request, pending_response } => { + let mut buf = Vec::with_capacity(request.encoded_len()); + if let Err(err) = request.encode(&mut buf) { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + request, err + ); + return } + + self.request_responses.send_request( + &target, &self.block_request_protocol_name, buf, pending_response, IfDisconnected::ImmediateError, + ); }, - CustomMessageOutcome::FinalityProofRequest { target, block_hash, request } => { - self.finality_proof_requests.send_request(&target, block_hash, request); - }, - CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles, notifications_sink } => { + CustomMessageOutcome::NotificationStreamOpened { remote, protocol, roles, notifications_sink } => { let role = reported_roles_to_observed_role(&self.role, &remote, roles); - for engine_id in protocols { - self.events.push_back(BehaviourOut::NotificationStreamOpened { - remote: remote.clone(), - engine_id, - role: role.clone(), - notifications_sink: notifications_sink.clone(), - }); - } + self.events.push_back(BehaviourOut::NotificationStreamOpened { + remote, + protocol, + role: role.clone(), + notifications_sink: notifications_sink.clone(), + }); }, - CustomMessageOutcome::NotificationStreamReplaced { remote, protocols, notifications_sink } => - for engine_id in protocols { - self.events.push_back(BehaviourOut::NotificationStreamReplaced { - remote: remote.clone(), - engine_id, - notifications_sink: notifications_sink.clone(), - }); - }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => - for engine_id in protocols { - self.events.push_back(BehaviourOut::NotificationStreamClosed { - remote: remote.clone(), - engine_id, - }); - }, + CustomMessageOutcome::NotificationStreamReplaced { remote, protocol, notifications_sink } => + self.events.push_back(BehaviourOut::NotificationStreamReplaced { + remote, + protocol, + notifications_sink, + }), + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => + self.events.push_back(BehaviourOut::NotificationStreamClosed { + remote, + protocol, + }), CustomMessageOutcome::NotificationsReceived { remote, messages } => { self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, CustomMessageOutcome::PeerNewBest(peer_id, number) => { - self.light_client_handler.update_best_block(&peer_id, number); + self.light_client_request_sender.update_best_block(&peer_id, number); + } + CustomMessageOutcome::SyncConnected(peer_id) => { + self.light_client_request_sender.inject_connected(peer_id); + self.events.push_back(BehaviourOut::SyncConnected(peer_id)) + } + CustomMessageOutcome::SyncDisconnected(peer_id) => { + self.light_client_request_sender.inject_disconnected(peer_id); + self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)) } CustomMessageOutcome::None => {} } @@ -408,66 +385,15 @@ impl NetworkBehaviourEventProcess { + request_responses::Event::RequestFinished { peer, protocol, duration, result } => { self.events.push_back(BehaviourOut::RequestFinished { - request_id, - result, - }); - }, - } - } -} - -impl NetworkBehaviourEventProcess> for Behaviour { - fn inject_event(&mut self, event: block_requests::Event) { - match event { - block_requests::Event::AnsweredRequest { peer, total_handling_time } => { - self.events.push_back(BehaviourOut::InboundRequest { - peer, - protocol: self.block_requests.protocol_name().to_owned().into(), - result: Ok(total_handling_time), + peer, protocol, duration, result, }); }, - block_requests::Event::Response { peer, response, request_duration } => { - self.events.push_back(BehaviourOut::OpaqueRequestFinished { - peer: peer.clone(), - protocol: self.block_requests.protocol_name().to_owned(), - request_duration, - }); - let ev = self.substrate.on_block_response(peer, response); - self.inject_event(ev); - } - block_requests::Event::RequestCancelled { peer, request_duration, .. } | - block_requests::Event::RequestTimeout { peer, request_duration, .. } => { - // There doesn't exist any mechanism to report cancellations or timeouts yet, so - // we process them by disconnecting the node. - self.events.push_back(BehaviourOut::OpaqueRequestFinished { - peer: peer.clone(), - protocol: self.block_requests.protocol_name().to_owned(), - request_duration, - }); - self.substrate.on_block_request_failed(&peer); - } - } - } -} - -impl NetworkBehaviourEventProcess> for Behaviour { - fn inject_event(&mut self, event: finality_requests::Event) { - match event { - finality_requests::Event::Response { peer, block_hash, proof } => { - let response = message::FinalityProofResponse { - id: 0, - block: block_hash, - proof: if !proof.is_empty() { - Some(proof) - } else { - None - }, - }; - let ev = self.substrate.on_finality_proof_response(peer, response); - self.inject_event(ev); + request_responses::Event::ReputationChanges { peer, changes } => { + for change in changes { + self.substrate.report_peer(peer, change); + } } } } @@ -499,7 +425,7 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess // implementation for `PeerInfoEvent`. } DiscoveryOut::Discovered(peer_id) => { - self.substrate.add_discovered_nodes(iter::once(peer_id)); + self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); } DiscoveryOut::ValueFound(results, duration) => { self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); @@ -538,7 +464,31 @@ impl NetworkBehaviourEventProcess } impl Behaviour { - fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) -> Poll>> { + fn poll( + &mut self, + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll>> { + use light_client_requests::sender::OutEvent; + while let Poll::Ready(Some(event)) = + self.light_client_request_sender.poll_next_unpin(cx) + { + match event { + OutEvent::SendRequest { + target, + request, + pending_response, + protocol_name, + } => self.request_responses.send_request( + &target, + &protocol_name, + request, + pending_response, + IfDisconnected::ImmediateError, + ), + } + } + if let Some(event) = self.events.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) } diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs new file mode 100644 index 0000000000000..7129f3dbe07b1 --- /dev/null +++ b/client/network/src/bitswap.rs @@ -0,0 +1,338 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Bitswap server for substrate. +//! +//! Allows querying transactions by hash over standard bitswap protocol +//! Only supports bitswap 1.2.0. +//! CID is expected to reference 256-bit Blake2b transaction hash. + +use std::collections::VecDeque; +use std::io; +use std::sync::Arc; +use std::task::{Context, Poll}; +use cid::Version; +use codec::Encode; +use core::pin::Pin; +use futures::Future; +use futures::io::{AsyncRead, AsyncWrite}; +use libp2p::core::{ + connection::ConnectionId, Multiaddr, PeerId, + upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo, +}; +use libp2p::swarm::{ + NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, + ProtocolsHandler, IntoProtocolsHandler, OneShotHandler, +}; +use log::{error, debug, trace}; +use prost::Message; +use sp_runtime::traits::{Block as BlockT}; +use unsigned_varint::{encode as varint_encode}; +use crate::chain::Client; +use crate::schema::bitswap::{ + Message as BitswapMessage, + message::{wantlist::WantType, Block as MessageBlock, BlockPresenceType, BlockPresence}, +}; + +const LOG_TARGET: &str = "bitswap"; + +// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes +// https://github.com/ipfs/js-ipfs-bitswap/blob/ +// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 +// We set it to the same value as max substrate protocol message +const MAX_PACKET_SIZE: usize = 16 * 1024 * 1024; + +// Max number of queued responses before denying requests. +const MAX_RESPONSE_QUEUE: usize = 20; +// Max number of blocks per wantlist +const MAX_WANTED_BLOCKS: usize = 16; + +const PROTOCOL_NAME: &'static [u8] = b"/ipfs/bitswap/1.2.0"; + +type FutureResult = Pin> + Send>>; + +/// Bitswap protocol config +#[derive(Clone, Copy, Debug, Default)] +pub struct BitswapConfig; + +impl UpgradeInfo for BitswapConfig { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(PROTOCOL_NAME) + } +} + +impl InboundUpgrade for BitswapConfig +where + TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + type Output = BitswapMessage; + type Error = BitswapError; + type Future = FutureResult; + + fn upgrade_inbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { + Box::pin(async move { + let packet = upgrade::read_one(&mut socket, MAX_PACKET_SIZE).await?; + let message: BitswapMessage = Message::decode(packet.as_slice())?; + Ok(message) + }) + } +} + +impl UpgradeInfo for BitswapMessage { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(PROTOCOL_NAME) + } +} + +impl OutboundUpgrade for BitswapMessage +where + TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + type Output = (); + type Error = io::Error; + type Future = FutureResult; + + fn upgrade_outbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { + Box::pin(async move { + let mut data = Vec::with_capacity(self.encoded_len()); + self.encode(&mut data)?; + upgrade::write_one(&mut socket, data).await + }) + } +} + +/// Internal protocol handler event. +#[derive(Debug)] +pub enum HandlerEvent { + /// We received a `BitswapMessage` from a remote. + Request(BitswapMessage), + /// We successfully sent a `BitswapMessage`. + ResponseSent, +} + +impl From for HandlerEvent { + fn from(message: BitswapMessage) -> Self { + Self::Request(message) + } +} + +impl From<()> for HandlerEvent { + fn from(_: ()) -> Self { + Self::ResponseSent + } +} + +/// Prefix represents all metadata of a CID, without the actual content. +#[derive(PartialEq, Eq, Clone, Debug)] +struct Prefix { + /// The version of CID. + pub version: Version, + /// The codec of CID. + pub codec: u64, + /// The multihash type of CID. + pub mh_type: u64, + /// The multihash length of CID. + pub mh_len: u8, +} + +impl Prefix { + /// Convert the prefix to encoded bytes. + pub fn to_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(4); + let mut buf = varint_encode::u64_buffer(); + let version = varint_encode::u64(self.version.into(), &mut buf); + res.extend_from_slice(version); + let mut buf = varint_encode::u64_buffer(); + let codec = varint_encode::u64(self.codec.into(), &mut buf); + res.extend_from_slice(codec); + let mut buf = varint_encode::u64_buffer(); + let mh_type = varint_encode::u64(self.mh_type.into(), &mut buf); + res.extend_from_slice(mh_type); + let mut buf = varint_encode::u64_buffer(); + let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); + res.extend_from_slice(mh_len); + res + } +} + +/// Network behaviour that handles sending and receiving IPFS blocks. +pub struct Bitswap { + client: Arc>, + ready_blocks: VecDeque<(PeerId, BitswapMessage)>, +} + +impl Bitswap { + /// Create a new instance of the bitswap protocol handler. + pub fn new(client: Arc>) -> Self { + Bitswap { + client, + ready_blocks: Default::default(), + } + } +} + +impl NetworkBehaviour for Bitswap { + type ProtocolsHandler = OneShotHandler; + type OutEvent = void::Void; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + Default::default() + } + + fn addresses_of_peer(&mut self, _peer: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _peer: &PeerId) { + } + + fn inject_disconnected(&mut self, _peer: &PeerId) { + } + + fn inject_event(&mut self, peer: PeerId, _connection: ConnectionId, message: HandlerEvent) { + let request = match message { + HandlerEvent::ResponseSent => return, + HandlerEvent::Request(msg) => msg, + }; + trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); + if self.ready_blocks.len() > MAX_RESPONSE_QUEUE { + debug!(target: LOG_TARGET, "Ignored request: queue is full"); + return; + } + let mut response = BitswapMessage { + wantlist: None, + blocks: Default::default(), + payload: Default::default(), + block_presences: Default::default(), + pending_bytes: 0, + }; + let wantlist = match request.wantlist { + Some(wantlist) => wantlist, + None => { + debug!( + target: LOG_TARGET, + "Unexpected bitswap message from {}", + peer, + ); + return; + } + }; + if wantlist.entries.len() > MAX_WANTED_BLOCKS { + trace!(target: LOG_TARGET, "Ignored request: too many entries"); + return; + } + for entry in wantlist.entries { + let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { + Ok(cid) => cid, + Err(e) => { + trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); + continue; + } + }; + if cid.version() != cid::Version::V1 + || cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) + || cid.hash().size() != 32 + { + debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); + continue + } + let mut hash = B::Hash::default(); + hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); + let extrinsic = match self.client.extrinsic(&hash) { + Ok(ex) => ex, + Err(e) => { + error!(target: LOG_TARGET, "Error retrieving extrinsic {}: {}", hash, e); + None + } + }; + match extrinsic { + Some(extrinsic) => { + trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); + if entry.want_type == WantType::Block as i32 { + let prefix = Prefix { + version: cid.version(), + codec: cid.codec(), + mh_type: cid.hash().code(), + mh_len: cid.hash().size(), + }; + response.payload.push(MessageBlock { + prefix: prefix.to_bytes(), + data: extrinsic.encode(), + }); + } else { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::Have as i32, + cid: cid.to_bytes(), + }); + } + }, + None => { + trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash); + if entry.send_dont_have { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::DontHave as i32, + cid: cid.to_bytes(), + }); + } + } + } + } + trace!(target: LOG_TARGET, "Response: {:?}", response); + self.ready_blocks.push_back((peer, response)); + } + + fn poll(&mut self, _ctx: &mut Context, _: &mut impl PollParameters) -> Poll< + NetworkBehaviourAction< + <::Handler as ProtocolsHandler>::InEvent, + Self::OutEvent, + >, + > { + if let Some((peer_id, message)) = self.ready_blocks.pop_front() { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::Any, + event: message, + }) + } + Poll::Pending + } +} + +/// Bitswap protocol error. +#[derive(derive_more::Display, derive_more::From)] +pub enum BitswapError { + /// Protobuf decoding error. + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + /// Protobuf encoding error. + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + /// Client backend error. + Client(sp_blockchain::Error), + /// Error parsing CID + BadCid(cid::Error), + /// Packet read error. + Read(upgrade::ReadOneError), + /// Error sending response. + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/src/block_request_handler.rs b/client/network/src/block_request_handler.rs new file mode 100644 index 0000000000000..92f21f44f9d1c --- /dev/null +++ b/client/network/src/block_request_handler.rs @@ -0,0 +1,225 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) block requests from a remote peer via the +//! [`crate::request_responses::RequestResponsesBehaviour`]. + +use codec::{Encode, Decode}; +use crate::chain::Client; +use crate::config::ProtocolId; +use crate::protocol::{message::BlockAttributes}; +use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; +use crate::schema::v1::block_request::FromBlock; +use crate::schema::v1::{BlockResponse, Direction}; +use futures::channel::{mpsc, oneshot}; +use futures::stream::StreamExt; +use log::debug; +use prost::Message; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, Header, One, Zero}; +use std::cmp::min; +use std::sync::{Arc}; +use std::time::Duration; + +const LOG_TARGET: &str = "block-request-handler"; +const MAX_BLOCKS_IN_RESPONSE: usize = 128; +const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; + +/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { + ProtocolConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(40), + inbound_queue: None, + } +} + +/// Generate the block protocol name from chain specific protocol identifier. +// +// Visibility `pub(crate)` to allow `crate::light_client_requests::sender` to generate block request +// protocol name and send block requests. +pub(crate) fn generate_protocol_name(protocol_id: &ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/sync/2"); + s +} + +/// Handler for incoming block requests from a remote peer. +pub struct BlockRequestHandler { + client: Arc>, + request_receiver: mpsc::Receiver, +} + +impl BlockRequestHandler { + /// Create a new [`BlockRequestHandler`]. + pub fn new(protocol_id: &ProtocolId, client: Arc>) -> (Self, ProtocolConfig) { + // Rate of arrival multiplied with the waiting time in the queue equals the queue length. + // + // An average Polkadot sentry node serves less than 5 requests per second. The 95th percentile + // serving a request is less than 2 second. Thus one would estimate the queue length to be + // below 10. + // + // Choosing 20 as the queue length to give some additional buffer. + let (tx, request_receiver) = mpsc::channel(20); + + let mut protocol_config = generate_protocol_config(protocol_id); + protocol_config.inbound_queue = Some(tx); + + (Self { client, request_receiver }, protocol_config) + } + + /// Run [`BlockRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response) { + Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer), + Err(e) => debug!( + target: LOG_TARGET, + "Failed to handle block request from {}: {}", + peer, e, + ), + } + } + } + + fn handle_request( + &self, + payload: Vec, + pending_response: oneshot::Sender + ) -> Result<(), HandleRequestError> { + let request = crate::schema::v1::BlockRequest::decode(&payload[..])?; + + let from_block_id = match request.from_block.ok_or(HandleRequestError::MissingFromField)? { + FromBlock::Hash(ref h) => { + let h = Decode::decode(&mut h.as_ref())?; + BlockId::::Hash(h) + } + FromBlock::Number(ref n) => { + let n = Decode::decode(&mut n.as_ref())?; + BlockId::::Number(n) + } + }; + + let max_blocks = if request.max_blocks == 0 { + MAX_BLOCKS_IN_RESPONSE + } else { + min(request.max_blocks as usize, MAX_BLOCKS_IN_RESPONSE) + }; + + let direction = Direction::from_i32(request.direction) + .ok_or(HandleRequestError::ParseDirection)?; + let attributes = BlockAttributes::from_be_u32(request.fields)?; + let get_header = attributes.contains(BlockAttributes::HEADER); + let get_body = attributes.contains(BlockAttributes::BODY); + let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); + + let mut blocks = Vec::new(); + let mut block_id = from_block_id; + + let mut total_size: usize = 0; + while let Some(header) = self.client.header(block_id).unwrap_or(None) { + let number = *header.number(); + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + let justification = if get_justification { + self.client.justification(&BlockId::Hash(hash))? + } else { + None + }; + let is_empty_justification = justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); + + let body = if get_body { + match self.client.block_body(&BlockId::Hash(hash))? { + Some(mut extrinsics) => extrinsics.iter_mut() + .map(|extrinsic| extrinsic.encode()) + .collect(), + None => { + log::trace!(target: "sync", "Missing data for block request."); + break; + } + } + } else { + Vec::new() + }; + + let block_data = crate::schema::v1::BlockData { + hash: hash.encode(), + header: if get_header { + header.encode() + } else { + Vec::new() + }, + body, + receipt: Vec::new(), + message_queue: Vec::new(), + justification: justification.unwrap_or_default(), + is_empty_justification, + }; + + total_size += block_data.body.len(); + blocks.push(block_data); + + if blocks.len() >= max_blocks as usize || total_size > MAX_BODY_BYTES { + break + } + + match direction { + Direction::Ascending => { + block_id = BlockId::Number(number + One::one()) + } + Direction::Descending => { + if number.is_zero() { + break + } + block_id = BlockId::Hash(parent_hash) + } + } + } + + let res = BlockResponse { blocks }; + + let mut data = Vec::with_capacity(res.encoded_len()); + res.encode(&mut data)?; + + pending_response.send(OutgoingResponse { + result: Ok(data), + reputation_changes: Vec::new(), + }).map_err(|_| HandleRequestError::SendResponse) + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + #[display(fmt = "Missing `BlockRequest::from_block` field.")] + MissingFromField, + #[display(fmt = "Failed to parse BlockRequest::direction.")] + ParseDirection, + Client(sp_blockchain::Error), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs deleted file mode 100644 index ace63e6e1cdd4..0000000000000 --- a/client/network/src/block_requests.rs +++ /dev/null @@ -1,857 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! `NetworkBehaviour` implementation which handles incoming block requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Incoming requests are encoded -//! as protocol buffers (cf. `api.v1.proto`). - -#![allow(unused)] - -use bytes::Bytes; -use codec::{Encode, Decode}; -use crate::{ - chain::Client, - config::ProtocolId, - protocol::{message::{self, BlockAttributes}}, - schema, -}; -use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use futures_timer::Delay; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{DeniedUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol - } -}; -use prost::Message; -use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; -use std::{ - cmp::min, - collections::{HashMap, VecDeque}, - io, - iter, - marker::PhantomData, - pin::Pin, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::{Void, unreachable}; -use wasm_timer::Instant; - -// Type alias for convenience. -pub type Error = Box; - -/// Event generated by the block requests behaviour. -#[derive(Debug)] -pub enum Event { - /// A request came and we have successfully answered it. - AnsweredRequest { - /// Peer which has emitted the request. - peer: PeerId, - /// Time elapsed between when we received the request and when we sent back the response. - total_handling_time: Duration, - }, - - /// A response to a block request has arrived. - Response { - peer: PeerId, - response: message::BlockResponse, - /// Time elapsed between the start of the request and the response. - request_duration: Duration, - }, - - /// A request has been cancelled because the peer has disconnected. - /// Disconnects can also happen as a result of violating the network protocol. - /// - /// > **Note**: This event is NOT emitted if a request is overridden by calling `send_request`. - /// > For that, you must check the value returned by `send_request`. - RequestCancelled { - peer: PeerId, - /// Time elapsed between the start of the request and the cancellation. - request_duration: Duration, - }, - - /// A request has timed out. - RequestTimeout { - peer: PeerId, - /// Time elapsed between the start of the request and the timeout. - request_duration: Duration, - } -} - -/// Configuration options for `BlockRequests`. -#[derive(Debug, Clone)] -pub struct Config { - max_block_data_response: u32, - max_block_body_bytes: usize, - max_request_len: usize, - max_response_len: usize, - inactivity_timeout: Duration, - request_timeout: Duration, - protocol: String, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. block data in response = 128 - /// - max. request size = 1 MiB - /// - max. response size = 16 MiB - /// - inactivity timeout = 15s - /// - request timeout = 40s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_block_data_response: 128, - max_block_body_bytes: 8 * 1024 * 1024, - max_request_len: 1024 * 1024, - max_response_len: 16 * 1024 * 1024, - inactivity_timeout: Duration::from_secs(15), - request_timeout: Duration::from_secs(40), - protocol: String::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. number of block data in a response. - pub fn set_max_block_data_response(&mut self, v: u32) -> &mut Self { - self.max_block_data_response = v; - self - } - - /// Limit the max. length of incoming block request bytes. - pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { - self.max_request_len = v; - self - } - - /// Limit the max. size of responses to our block requests. - pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { - self.max_response_len = v; - self - } - - /// Limit the max. duration the substream may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Set the maximum total bytes of block bodies that are send in the response. - /// Note that at least one block is always sent regardless of the limit. - /// This should be lower than the value specified in `set_max_response_len` - /// accounting for headers, justifications and encoding overhead. - pub fn set_max_block_body_bytes(&mut self, v: usize) -> &mut Self { - self.max_block_body_bytes = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut s = String::new(); - s.push_str("/"); - s.push_str(id.as_ref()); - s.push_str("/sync/2"); - self.protocol = s; - self - } -} - -/// The block request handling behaviour. -pub struct BlockRequests { - /// This behaviour's configuration. - config: Config, - /// Blockchain client. - chain: Arc>, - /// List of all active connections and the requests we've sent. - peers: HashMap>>, - /// Futures sending back the block request response. Returns the `PeerId` we sent back to, and - /// the total time the handling of this request took. - outgoing: FuturesUnordered>, - /// Events to return as soon as possible from `poll`. - pending_events: VecDeque, Event>>, -} - -/// Local tracking of a libp2p connection. -#[derive(Debug)] -struct Connection { - id: ConnectionId, - ongoing_request: Option>, -} - -#[derive(Debug)] -struct OngoingRequest { - /// `Instant` when the request has been emitted. Used for diagnostic purposes. - emitted: Instant, - request: message::BlockRequest, - timeout: Delay, -} - -/// Outcome of calling `send_request`. -#[derive(Debug)] -#[must_use] -pub enum SendRequestOutcome { - /// Request has been emitted. - Ok, - /// The request has been emitted and has replaced an existing request. - Replaced { - /// The previously-emitted request. - previous: message::BlockRequest, - /// Time that had elapsed since `previous` has been emitted. - request_duration: Duration, - }, - /// Didn't start a request because we have no connection to this node. - /// If `send_request` returns that, it is as if the function had never been called. - NotConnected, - /// Error while serializing the request. - EncodeError(prost::EncodeError), -} - -impl BlockRequests -where - B: Block, -{ - pub fn new(cfg: Config, chain: Arc>) -> Self { - BlockRequests { - config: cfg, - chain, - peers: HashMap::new(), - outgoing: FuturesUnordered::new(), - pending_events: VecDeque::new(), - } - } - - /// Returns the libp2p protocol name used on the wire (e.g. `/foo/sync/2`). - pub fn protocol_name(&self) -> &str { - &self.config.protocol - } - - /// Issue a new block request. - /// - /// Cancels any existing request targeting the same `PeerId`. - /// - /// If the response doesn't arrive in time, or if the remote answers improperly, the target - /// will be disconnected. - pub fn send_request(&mut self, target: &PeerId, req: message::BlockRequest) -> SendRequestOutcome { - // Determine which connection to send the request to. - let connection = if let Some(peer) = self.peers.get_mut(target) { - // We don't want to have multiple requests for any given node, so in priority try to - // find a connection with an existing request, to override it. - if let Some(entry) = peer.iter_mut().find(|c| c.ongoing_request.is_some()) { - entry - } else if let Some(entry) = peer.get_mut(0) { - entry - } else { - log::error!( - target: "sync", - "State inconsistency: empty list of peer connections" - ); - return SendRequestOutcome::NotConnected; - } - } else { - return SendRequestOutcome::NotConnected; - }; - - let protobuf_rq = build_protobuf_block_request( - req.fields, - req.from.clone(), - req.to.clone(), - req.direction, - req.max, - ); - - let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); - if let Err(err) = protobuf_rq.encode(&mut buf) { - log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - protobuf_rq, - err - ); - return SendRequestOutcome::EncodeError(err); - } - - let previous_request = connection.ongoing_request.take(); - connection.ongoing_request = Some(OngoingRequest { - emitted: Instant::now(), - request: req.clone(), - timeout: Delay::new(self.config.request_timeout), - }); - - log::trace!(target: "sync", "Enqueueing block request to {:?}: {:?}", target, protobuf_rq); - self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::One(connection.id), - event: OutboundProtocol { - request: buf, - original_request: req, - max_response_size: self.config.max_response_len, - protocol: self.config.protocol.as_bytes().to_vec().into(), - }, - }); - - if let Some(previous_request) = previous_request { - log::debug!( - target: "sync", - "Replacing existing block request on connection {:?}", - connection.id - ); - SendRequestOutcome::Replaced { - previous: previous_request.request, - request_duration: previous_request.emitted.elapsed(), - } - } else { - SendRequestOutcome::Ok - } - } - - /// Callback, invoked when a new block request has been received from remote. - fn on_block_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::BlockRequest - ) -> Result - { - log::trace!( - target: "sync", - "Block request from peer {}: from block {:?} to block {:?}, max blocks {:?}", - peer, - request.from_block, - request.to_block, - request.max_blocks); - - let from_block_id = - match request.from_block { - Some(schema::v1::block_request::FromBlock::Hash(ref h)) => { - let h = Decode::decode(&mut h.as_ref())?; - BlockId::::Hash(h) - } - Some(schema::v1::block_request::FromBlock::Number(ref n)) => { - let n = Decode::decode(&mut n.as_ref())?; - BlockId::::Number(n) - } - None => { - let msg = "missing `BlockRequest::from_block` field"; - return Err(io::Error::new(io::ErrorKind::Other, msg).into()) - } - }; - - let max_blocks = - if request.max_blocks == 0 { - self.config.max_block_data_response - } else { - min(request.max_blocks, self.config.max_block_data_response) - }; - - let direction = - if request.direction == schema::v1::Direction::Ascending as i32 { - schema::v1::Direction::Ascending - } else if request.direction == schema::v1::Direction::Descending as i32 { - schema::v1::Direction::Descending - } else { - let msg = format!("invalid `BlockRequest::direction` value: {}", request.direction); - return Err(io::Error::new(io::ErrorKind::Other, msg).into()) - }; - - let attributes = BlockAttributes::from_be_u32(request.fields)?; - let get_header = attributes.contains(BlockAttributes::HEADER); - let get_body = attributes.contains(BlockAttributes::BODY); - let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); - - let mut blocks = Vec::new(); - let mut block_id = from_block_id; - let mut total_size = 0; - while let Some(header) = self.chain.header(block_id).unwrap_or(None) { - if blocks.len() >= max_blocks as usize - || (blocks.len() >= 1 && total_size > self.config.max_block_body_bytes) - { - break - } - - let number = *header.number(); - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - let justification = if get_justification { - self.chain.justification(&BlockId::Hash(hash))? - } else { - None - }; - let is_empty_justification = justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); - - let body = if get_body { - match self.chain.block_body(&BlockId::Hash(hash))? { - Some(mut extrinsics) => extrinsics.iter_mut() - .map(|extrinsic| extrinsic.encode()) - .collect(), - None => { - log::trace!(target: "sync", "Missing data for block request."); - break; - } - } - } else { - Vec::new() - }; - - let block_data = schema::v1::BlockData { - hash: hash.encode(), - header: if get_header { - header.encode() - } else { - Vec::new() - }, - body, - receipt: Vec::new(), - message_queue: Vec::new(), - justification: justification.unwrap_or_default(), - is_empty_justification, - }; - - total_size += block_data.body.len(); - blocks.push(block_data); - - match direction { - schema::v1::Direction::Ascending => { - block_id = BlockId::Number(number + One::one()) - } - schema::v1::Direction::Descending => { - if number.is_zero() { - break - } - block_id = BlockId::Hash(parent_hash) - } - } - } - - Ok(schema::v1::BlockResponse { blocks }) - } -} - -impl NetworkBehaviour for BlockRequests -where - B: Block -{ - type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; - type OutEvent = Event; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_len: self.config.max_request_len, - protocol: self.config.protocol.as_bytes().to_owned().into(), - marker: PhantomData, - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - cfg.outbound_substream_timeout = self.config.request_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _peer: &PeerId) { - } - - fn inject_disconnected(&mut self, _peer: &PeerId) { - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, id: &ConnectionId, _: &ConnectedPoint) { - self.peers.entry(peer_id.clone()) - .or_default() - .push(Connection { - id: *id, - ongoing_request: None, - }); - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, id: &ConnectionId, _: &ConnectedPoint) { - let mut needs_remove = false; - if let Some(entry) = self.peers.get_mut(peer_id) { - if let Some(pos) = entry.iter().position(|i| i.id == *id) { - let ongoing_request = entry.remove(pos).ongoing_request; - if let Some(ongoing_request) = ongoing_request { - log::debug!( - target: "sync", - "Connection {:?} with {} closed with ongoing sync request: {:?}", - id, - peer_id, - ongoing_request - ); - let ev = Event::RequestCancelled { - peer: peer_id.clone(), - request_duration: ongoing_request.emitted.elapsed(), - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - if entry.is_empty() { - needs_remove = true; - } - } else { - log::error!( - target: "sync", - "State inconsistency: connection id not found in list" - ); - } - } else { - log::error!( - target: "sync", - "State inconsistency: peer_id not found in list of connections" - ); - } - if needs_remove { - self.peers.remove(peer_id); - } - } - - fn inject_event( - &mut self, - peer: PeerId, - connection_id: ConnectionId, - node_event: NodeEvent - ) { - match node_event { - NodeEvent::Request(request, mut stream, handling_start) => { - match self.on_block_request(&peer, &request) { - Ok(res) => { - log::trace!( - target: "sync", - "Enqueueing block response for peer {} with {} blocks", - peer, res.blocks.len() - ); - let mut data = Vec::with_capacity(res.encoded_len()); - if let Err(e) = res.encode(&mut data) { - log::debug!( - target: "sync", - "Error encoding block response for peer {}: {}", - peer, e - ) - } else { - self.outgoing.push(async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!( - target: "sync", - "Error writing block response: {}", - e - ); - } - (peer, handling_start.elapsed()) - }.boxed()); - } - } - Err(e) => log::debug!( - target: "sync", - "Error handling block request from peer {}: {}", peer, e - ) - } - } - NodeEvent::Response(original_request, response) => { - log::trace!( - target: "sync", - "Received block response from peer {} with {} blocks", - peer, response.blocks.len() - ); - let request_duration = if let Some(connections) = self.peers.get_mut(&peer) { - if let Some(connection) = connections.iter_mut().find(|c| c.id == connection_id) { - if let Some(ongoing_request) = &mut connection.ongoing_request { - if ongoing_request.request == original_request { - let request_duration = ongoing_request.emitted.elapsed(); - connection.ongoing_request = None; - request_duration - } else { - // We're no longer interested in that request. - log::debug!( - target: "sync", - "Received response from {} to obsolete block request {:?}", - peer, - original_request - ); - return; - } - } else { - // We remove from `self.peers` requests we're no longer interested in, - // so this can legitimately happen. - log::trace!( - target: "sync", - "Response discarded because it concerns an obsolete request" - ); - return; - } - } else { - log::error!( - target: "sync", - "State inconsistency: response on non-existing connection {:?}", - connection_id - ); - return; - } - } else { - log::error!( - target: "sync", - "State inconsistency: response on non-connected peer {}", - peer - ); - return; - }; - - let blocks = response.blocks.into_iter().map(|block_data| { - Ok(message::BlockData:: { - hash: Decode::decode(&mut block_data.hash.as_ref())?, - header: if !block_data.header.is_empty() { - Some(Decode::decode(&mut block_data.header.as_ref())?) - } else { - None - }, - body: if original_request.fields.contains(message::BlockAttributes::BODY) { - Some(block_data.body.iter().map(|body| { - Decode::decode(&mut body.as_ref()) - }).collect::, _>>()?) - } else { - None - }, - receipt: if !block_data.message_queue.is_empty() { - Some(block_data.receipt) - } else { - None - }, - message_queue: if !block_data.message_queue.is_empty() { - Some(block_data.message_queue) - } else { - None - }, - justification: if !block_data.justification.is_empty() { - Some(block_data.justification) - } else if block_data.is_empty_justification { - Some(Vec::new()) - } else { - None - }, - }) - }).collect::, codec::Error>>(); - - match blocks { - Ok(blocks) => { - let id = original_request.id; - let ev = Event::Response { - peer, - response: message::BlockResponse:: { id, blocks }, - request_duration, - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - Err(err) => { - log::debug!( - target: "sync", - "Failed to decode block response from peer {}: {}", peer, err - ); - } - } - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) - -> Poll, Event>> - { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ev); - } - - // Check the request timeouts. - for (peer, connections) in &mut self.peers { - for connection in connections { - let ongoing_request = match &mut connection.ongoing_request { - Some(rq) => rq, - None => continue, - }; - - if let Poll::Ready(_) = Pin::new(&mut ongoing_request.timeout).poll(cx) { - let original_request = ongoing_request.request.clone(); - let request_duration = ongoing_request.emitted.elapsed(); - connection.ongoing_request = None; - log::debug!( - target: "sync", - "Request timeout for {}: {:?}", - peer, original_request - ); - let ev = Event::RequestTimeout { - peer: peer.clone(), - request_duration, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - } - } - - if let Poll::Ready(Some((peer, total_handling_time))) = self.outgoing.poll_next_unpin(cx) { - let ev = Event::AnsweredRequest { - peer, - total_handling_time, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - - Poll::Pending - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum NodeEvent { - /// Incoming request from remote, substream to use for the response, and when we started - /// handling this request. - Request(schema::v1::BlockRequest, T, Instant), - /// Incoming response from remote. - Response(message::BlockRequest, schema::v1::BlockResponse), -} - -/// Substream upgrade protocol. -/// -/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) -/// which will be handled by the `BlockRequests` behaviour, i.e. the request -/// will become visible via `inject_node_event` which then dispatches to the -/// relevant callback to process the message and prepare a response. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_len: usize, - /// The protocol to use during upgrade negotiation. - protocol: Bytes, - /// Type of the block. - marker: PhantomData, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl InboundUpgrade for InboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - // This `Instant` will be passed around until the processing of this request is done. - let handling_start = Instant::now(); - - let future = async move { - let len = self.max_request_len; - let vec = read_one(&mut s, len).await?; - match schema::v1::BlockRequest::decode(&vec[..]) { - Ok(r) => Ok(NodeEvent::Request(r, s, handling_start)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }; - future.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// The original request. Passed back through the API when the response comes back. - original_request: message::BlockRequest, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - schema::v1::BlockResponse::decode(&vec[..]) - .map(|r| NodeEvent::Response(self.original_request, r)) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }.boxed() - } -} - -/// Build protobuf block request message. -pub(crate) fn build_protobuf_block_request( - attributes: BlockAttributes, - from_block: message::FromBlock, - to_block: Option, - direction: message::Direction, - max_blocks: Option, -) -> schema::v1::BlockRequest { - schema::v1::BlockRequest { - fields: attributes.to_be_u32(), - from_block: match from_block { - message::FromBlock::Hash(h) => - Some(schema::v1::block_request::FromBlock::Hash(h.encode())), - message::FromBlock::Number(n) => - Some(schema::v1::block_request::FromBlock::Number(n.encode())), - }, - to_block: to_block.map(|h| h.encode()).unwrap_or_default(), - direction: match direction { - message::Direction::Ascending => schema::v1::Direction::Ascending as i32, - message::Direction::Descending => schema::v1::Direction::Descending as i32, - }, - max_blocks: max_blocks.unwrap_or(0), - } -} diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 20fbe0284397d..081d4b0d3ac3d 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -32,15 +32,3 @@ impl Client for T T: HeaderBackend + ProofProvider + BlockIdTo + BlockBackend + HeaderMetadata + Send + Sync {} - -/// Finality proof provider. -pub trait FinalityProofProvider: Send + Sync { - /// Prove finality of the block. - fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result>, Error>; -} - -impl FinalityProofProvider for () { - fn prove_finality(&self, _for_block: Block::Hash, _request: &[u8]) -> Result>, Error> { - Ok(None) - } -} diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 86450dc6e79bf..5a2327dda1308 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,9 +21,13 @@ //! The [`Params`] struct is the struct that must be passed in order to initialize the networking. //! See the documentation of [`Params`]. -pub use crate::chain::{Client, FinalityProofProvider}; +pub use crate::chain::Client; pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; -pub use crate::request_responses::{IncomingRequest, ProtocolConfig as RequestResponseConfig}; +pub use crate::request_responses::{ + IncomingRequest, + OutgoingResponse, + ProtocolConfig as RequestResponseConfig, +}; pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; // Note: this re-export shouldn't be part of the public API of the crate and will be removed in @@ -41,7 +45,7 @@ use libp2p::{ }; use prometheus_endpoint::Registry; use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::traits::Block as BlockT; use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; use std::{ collections::HashMap, @@ -70,17 +74,6 @@ pub struct Params { /// Client that contains the blockchain. pub chain: Arc>, - /// Finality proof provider. - /// - /// This object, if `Some`, is used when a node on the network requests a proof of finality - /// from us. - pub finality_proof_provider: Option>>, - - /// How to build requests for proofs of finality. - /// - /// This object, if `Some`, is used when we need a proof of finality from another node. - pub finality_proof_request_builder: Option>, - /// The `OnDemand` object acts as a "receiver" for block data requests from the client. /// If `Some`, the network worker will process these requests and answer them. /// Normally used only for light clients. @@ -106,6 +99,26 @@ pub struct Params { /// Registry for recording prometheus metrics to. pub metrics_registry: Option, + + /// Request response configuration for the block request protocol. + /// + /// [`RequestResponseConfig`] [`name`] is used to tag outgoing block requests with the correct + /// protocol name. In addition all of [`RequestResponseConfig`] is used to handle incoming block + /// requests, if enabled. + /// + /// Can be constructed either via [`block_request_handler::generate_protocol_config`] allowing + /// outgoing but not incoming requests, or constructed via + /// [`block_request_handler::BlockRequestHandler::new`] allowing both outgoing and incoming + /// requests. + pub block_request_protocol_config: RequestResponseConfig, + + /// Request response configuration for the light client request protocol. + /// + /// Can be constructed either via [`light_client_requests::generate_protocol_config`] allowing + /// outgoing but not incoming requests, or constructed via + /// [`light_client_requests::handler::LightClientRequestHandler::new`] allowing both outgoing + /// and incoming requests. + pub light_client_request_protocol_config: RequestResponseConfig, } /// Role of the local node. @@ -153,25 +166,6 @@ impl fmt::Display for Role { } } -/// Finality proof request builder. -pub trait FinalityProofRequestBuilder: Send { - /// Build data blob, associated with the request. - fn build_request_data(&mut self, hash: &B::Hash) -> Vec; -} - -/// Implementation of `FinalityProofRequestBuilder` that builds a dummy empty request. -#[derive(Debug, Default)] -pub struct DummyFinalityProofRequestBuilder; - -impl FinalityProofRequestBuilder for DummyFinalityProofRequestBuilder { - fn build_request_data(&mut self, _: &B::Hash) -> Vec { - Vec::new() - } -} - -/// Shared finality proof request builder struct used by the queue. -pub type BoxFinalityProofRequestBuilder = Box + Send + Sync>; - /// Result of the transaction import. #[derive(Clone, Copy, Debug)] pub enum TransactionImport { @@ -400,19 +394,12 @@ pub struct NetworkConfiguration { pub boot_nodes: Vec, /// The node key configuration, which determines the node's network identity keypair. pub node_key: NodeKeyConfig, - /// List of notifications protocols that the node supports. Must also include a - /// `ConsensusEngineId` for backwards-compatibility. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, /// List of request-response protocols that the node supports. pub request_response_protocols: Vec, - /// Maximum allowed number of incoming connections. - pub in_peers: u32, - /// Number of outgoing connections we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// The non-reserved peer mode. - pub non_reserved_mode: NonReservedPeerMode, + /// Configuration for the default set of nodes used for block syncing and transactions. + pub default_peers_set: SetConfig, + /// Configuration for extra sets of nodes. + pub extra_sets: Vec, /// Client identifier. Sent over the wire for debugging purposes. pub client_version: String, /// Name of the node. Sent over the wire for debugging purposes. @@ -421,11 +408,41 @@ pub struct NetworkConfiguration { pub transport: TransportConfig, /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, + + /// True if Kademlia random discovery should be enabled. + /// + /// If true, the node will automatically randomly walk the DHT in order to find new peers. + pub enable_dht_random_walk: bool, + /// Should we insert non-global addresses into the DHT? pub allow_non_globals_in_dht: bool, - /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in the - /// presence of potentially adversarial nodes. + + /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in + /// the presence of potentially adversarial nodes. pub kademlia_disjoint_query_paths: bool, + /// Enable serving block data over IPFS bitswap. + pub ipfs_server: bool, + + /// Size of Yamux receive window of all substreams. `None` for the default (256kiB). + /// Any value less than 256kiB is invalid. + /// + /// # Context + /// + /// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes + /// to be transferred at a time, where `N` is the Yamux receive window size configurable here. + /// This means, in practice, that every `N` bytes must be acknowledged by the receiver before + /// the sender can send more data. The maximum bandwidth of each notifications substream is + /// therefore `N / round_trip_time`. + /// + /// It is recommended to leave this to `None`, and use a request-response protocol instead if + /// a large amount of data must be transferred. The reason why the value is configurable is + /// that some Substrate users mis-use notification protocols to send large amounts of data. + /// As such, this option isn't designed to stay and will likely get removed in the future. + /// + /// Note that configuring a value here isn't a modification of the Yamux protocol, but rather + /// a modification of the way the implementation works. Different nodes with different + /// configured values remain compatible with each other. + pub yamux_window_size: Option, } impl NetworkConfiguration { @@ -442,12 +459,9 @@ impl NetworkConfiguration { public_addresses: Vec::new(), boot_nodes: Vec::new(), node_key, - notifications_protocols: Vec::new(), request_response_protocols: Vec::new(), - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, + default_peers_set: Default::default(), + extra_sets: Vec::new(), client_version: client_version.into(), node_name: node_name.into(), transport: TransportConfig::Normal { @@ -456,8 +470,11 @@ impl NetworkConfiguration { wasm_external_transport: None, }, max_parallel_downloads: 5, + enable_dht_random_walk: true, allow_non_globals_in_dht: false, kademlia_disjoint_query_paths: false, + yamux_window_size: None, + ipfs_server: false, } } @@ -500,6 +517,46 @@ impl NetworkConfiguration { } } +/// Configuration for a set of nodes. +#[derive(Clone, Debug)] +pub struct SetConfig { + /// Maximum allowed number of incoming substreams related to this set. + pub in_peers: u32, + /// Number of outgoing substreams related to this set that we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically + /// refused. + pub non_reserved_mode: NonReservedPeerMode, +} + +impl Default for SetConfig { + fn default() -> Self { + SetConfig { + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } + } +} + +/// Extension to [`SetConfig`] for sets that aren't the default set. +#[derive(Clone, Debug)] +pub struct NonDefaultSetConfig { + /// Name of the notifications protocols of this set. A substream on this set will be + /// considered established once this protocol is open. + /// + /// > **Note**: This field isn't present for the default set, as this is handled internally + /// > by the networking code. + pub notifications_protocol: Cow<'static, str>, + /// Maximum allowed size of single notifications. + pub max_notification_size: u64, + /// Base configuration. + pub set_config: SetConfig, +} + /// Configuration for the transport layer. #[derive(Clone, Debug)] pub enum TransportConfig { diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index f9bda6aabf5f8..87b533ef77dc1 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Discovery mechanisms of Substrate. //! @@ -51,16 +53,14 @@ use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use libp2p::swarm::protocols_handler::multi::MultiHandler; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler, IntoProtocolsHandler}; +use libp2p::swarm::protocols_handler::multi::IntoMultiHandler; use libp2p::kad::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; use libp2p::kad::GetClosestPeersError; -use libp2p::kad::handler::KademliaHandler; +use libp2p::kad::handler::KademliaHandlerProto; use libp2p::kad::QueryId; use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; #[cfg(not(target_os = "unknown"))] -use libp2p::swarm::toggle::Toggle; -#[cfg(not(target_os = "unknown"))] use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::multiaddr::Protocol; use log::{debug, info, trace, warn}; @@ -80,6 +80,7 @@ const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; pub struct DiscoveryConfig { local_peer_id: PeerId, user_defined: Vec<(PeerId, Multiaddr)>, + dht_random_walk: bool, allow_private_ipv4: bool, allow_non_globals_in_dht: bool, discovery_only_if_under_num: u64, @@ -94,6 +95,7 @@ impl DiscoveryConfig { DiscoveryConfig { local_peer_id: local_public_key.into_peer_id(), user_defined: Vec::new(), + dht_random_walk: true, allow_private_ipv4: true, allow_non_globals_in_dht: false, discovery_only_if_under_num: std::u64::MAX, @@ -118,6 +120,13 @@ impl DiscoveryConfig { self } + /// Whether the discovery behaviour should periodically perform a random + /// walk on the DHT to discover peers. + pub fn with_dht_random_walk(&mut self, value: bool) -> &mut Self { + self.dht_random_walk = value; + self + } + /// Should private IPv4 addresses be reported? pub fn allow_private_ipv4(&mut self, value: bool) -> &mut Self { self.allow_private_ipv4 = value; @@ -163,6 +172,7 @@ impl DiscoveryConfig { let DiscoveryConfig { local_peer_id, user_defined, + dht_random_walk, allow_private_ipv4, allow_non_globals_in_dht, discovery_only_if_under_num, @@ -197,7 +207,11 @@ impl DiscoveryConfig { DiscoveryBehaviour { user_defined, kademlias, - next_kad_random_query: Delay::new(Duration::new(0, 0)), + next_kad_random_query: if dht_random_walk { + Some(Delay::new(Duration::new(0, 0))) + } else { + None + }, duration_to_next_kad: Duration::from_secs(1), pending_events: VecDeque::new(), local_peer_id, @@ -206,15 +220,9 @@ impl DiscoveryConfig { discovery_only_if_under_num, #[cfg(not(target_os = "unknown"))] mdns: if enable_mdns { - match Mdns::new() { - Ok(mdns) => Some(mdns).into(), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None.into() - } - } + MdnsWrapper::Instantiating(Mdns::new().boxed()) } else { - None.into() + MdnsWrapper::Disabled }, allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( @@ -234,9 +242,10 @@ pub struct DiscoveryBehaviour { kademlias: HashMap>, /// Discovers nodes on the local network. #[cfg(not(target_os = "unknown"))] - mdns: Toggle, - /// Stream that fires when we need to perform the next random Kademlia query. - next_kad_random_query: Delay, + mdns: MdnsWrapper, + /// Stream that fires when we need to perform the next random Kademlia query. `None` if + /// random walking is disabled. + next_kad_random_query: Option, /// After `next_kad_random_query` triggers, the next one triggers after this duration. duration_to_next_kad: Duration, /// Events to return in priority when polled. @@ -440,18 +449,20 @@ pub enum DiscoveryOut { ValuePutFailed(record::Key, Duration), /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. + /// + /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. RandomKademliaStarted(Vec), } impl NetworkBehaviour for DiscoveryBehaviour { - type ProtocolsHandler = MultiHandler>; + type ProtocolsHandler = IntoMultiHandler>; type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { let iter = self.kademlias.iter_mut() .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); - MultiHandler::try_from_iter(iter) + IntoMultiHandler::try_from_iter(iter) .expect("There can be at most one handler per `ProtocolId` and \ protocol names contain the `ProtocolId` so no two protocol \ names in `self.kademlias` can be equal which is the only error \ @@ -534,7 +545,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, peer_id: PeerId, connection: ConnectionId, - (pid, event): ::OutEvent, + (pid, event): <::Handler as ProtocolsHandler>::OutEvent, ) { if let Some(kad) = self.kademlias.get_mut(&pid) { return kad.inject_event(peer_id, connection, event) @@ -598,7 +609,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { params: &mut impl PollParameters, ) -> Poll< NetworkBehaviourAction< - ::InEvent, + <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent, >, > { @@ -608,34 +619,36 @@ impl NetworkBehaviour for DiscoveryBehaviour { } // Poll the stream that fires when we need to start a random Kademlia query. - while let Poll::Ready(_) = self.next_kad_random_query.poll_unpin(cx) { - let actually_started = if self.num_connections < self.discovery_only_if_under_num { - let random_peer_id = PeerId::random(); - debug!(target: "sub-libp2p", - "Libp2p <= Starting random Kademlia request for {:?}", - random_peer_id); - for k in self.kademlias.values_mut() { - k.get_closest_peers(random_peer_id.clone()); + if let Some(next_kad_random_query) = self.next_kad_random_query.as_mut() { + while let Poll::Ready(_) = next_kad_random_query.poll_unpin(cx) { + let actually_started = if self.num_connections < self.discovery_only_if_under_num { + let random_peer_id = PeerId::random(); + debug!(target: "sub-libp2p", + "Libp2p <= Starting random Kademlia request for {:?}", + random_peer_id); + for k in self.kademlias.values_mut() { + k.get_closest_peers(random_peer_id.clone()); + } + true + } else { + debug!( + target: "sub-libp2p", + "Kademlia paused due to high number of connections ({})", + self.num_connections + ); + false + }; + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + *next_kad_random_query = Delay::new(self.duration_to_next_kad); + self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, + Duration::from_secs(60)); + + if actually_started { + let ev = DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } - true - } else { - debug!( - target: "sub-libp2p", - "Kademlia paused due to high number of connections ({})", - self.num_connections - ); - false - }; - - // Schedule the next random query with exponentially increasing delay, - // capped at 60 seconds. - self.next_kad_random_query = Delay::new(self.duration_to_next_kad); - self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, - Duration::from_secs(60)); - - if actually_started { - let ev = DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } } @@ -693,7 +706,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } Err(e) => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } @@ -704,7 +717,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { let ev = match res { Ok(ok) => DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_else(Default::default)), Err(e) => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); DiscoveryOut::ValuePutFailed(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } @@ -716,7 +729,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { Ok(ok) => debug!(target: "sub-libp2p", "Libp2p => Record republished: {:?}", ok.key), - Err(e) => warn!(target: "sub-libp2p", + Err(e) => debug!(target: "sub-libp2p", "Libp2p => Republishing of record {:?} failed with: {:?}", e.key(), e) } @@ -736,8 +749,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { handler, event: (pid.clone(), event) }), - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } } @@ -767,8 +780,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, // `event` is an enum with no variant - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } @@ -785,6 +798,48 @@ fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { v } +/// [`Mdns::new`] returns a future. Instead of forcing [`DiscoveryConfig::finish`] and all its +/// callers to be async, lazily instantiate [`Mdns`]. +#[cfg(not(target_os = "unknown"))] +enum MdnsWrapper { + Instantiating(futures::future::BoxFuture<'static, std::io::Result>), + Ready(Mdns), + Disabled, +} + +#[cfg(not(target_os = "unknown"))] +impl MdnsWrapper { + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + match self { + MdnsWrapper::Instantiating(_) => Vec::new(), + MdnsWrapper::Ready(mdns) => mdns.addresses_of_peer(peer_id), + MdnsWrapper::Disabled => Vec::new(), + } + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + params: &mut impl PollParameters, + ) -> Poll> { + loop { + match self { + MdnsWrapper::Instantiating(fut) => { + *self = match futures::ready!(fut.as_mut().poll(cx)) { + Ok(mdns) => MdnsWrapper::Ready(mdns), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + MdnsWrapper::Disabled + }, + } + } + MdnsWrapper::Ready(mdns) => return mdns.poll(cx, params), + MdnsWrapper::Disabled => return Poll::Pending, + } + } + } +} + #[cfg(test)] mod tests { use crate::config::ProtocolId; @@ -816,7 +871,7 @@ mod tests { let transport = MemoryTransport .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::Config::default()) + .multiplex(yamux::YamuxConfig::default()) .boxed(); let behaviour = { diff --git a/client/network/src/error.rs b/client/network/src/error.rs index 7d7603ce92aab..2a226b58b46a5 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/network/src/finality_requests.rs b/client/network/src/finality_requests.rs deleted file mode 100644 index 55f56b9a0cc25..0000000000000 --- a/client/network/src/finality_requests.rs +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! `NetworkBehaviour` implementation which handles incoming finality proof requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Incoming requests are encoded -//! as protocol buffers (cf. `finality.v1.proto`). - -#![allow(unused)] - -use bytes::Bytes; -use codec::{Encode, Decode}; -use crate::{ - chain::FinalityProofProvider, - config::ProtocolId, - protocol::message, - schema, -}; -use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{DeniedUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol - } -}; -use prost::Message; -use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; -use std::{ - cmp::min, - collections::VecDeque, - io, - iter, - marker::PhantomData, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::{Void, unreachable}; - -// Type alias for convenience. -pub type Error = Box; - -/// Event generated by the finality proof requests behaviour. -#[derive(Debug)] -pub enum Event { - /// A response to a finality proof request has arrived. - Response { - peer: PeerId, - /// Block hash originally passed to `send_request`. - block_hash: B::Hash, - /// Finality proof returned by the remote. - proof: Vec, - }, -} - -/// Configuration options for `FinalityProofRequests`. -#[derive(Debug, Clone)] -pub struct Config { - max_request_len: usize, - max_response_len: usize, - inactivity_timeout: Duration, - protocol: Bytes, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. request size = 1 MiB - /// - max. response size = 1 MiB - /// - inactivity timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_request_len: 1024 * 1024, - max_response_len: 1024 * 1024, - inactivity_timeout: Duration::from_secs(15), - protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. length of incoming finality proof request bytes. - pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { - self.max_request_len = v; - self - } - - /// Limit the max. length of incoming finality proof response bytes. - pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { - self.max_response_len = v; - self - } - - /// Limit the max. duration the substream may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut v = Vec::new(); - v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_ref().as_bytes()); - v.extend_from_slice(b"/finality-proof/1"); - self.protocol = v.into(); - self - } -} - -/// The finality proof request handling behaviour. -pub struct FinalityProofRequests { - /// This behaviour's configuration. - config: Config, - /// How to construct finality proofs. - finality_proof_provider: Option>>, - /// Futures sending back the finality proof request responses. - outgoing: FuturesUnordered>, - /// Events to return as soon as possible from `poll`. - pending_events: VecDeque, Event>>, -} - -impl FinalityProofRequests -where - B: Block, -{ - /// Initializes the behaviour. - /// - /// If the proof provider is `None`, then the behaviour will not support the finality proof - /// requests protocol. - pub fn new(cfg: Config, finality_proof_provider: Option>>) -> Self { - FinalityProofRequests { - config: cfg, - finality_proof_provider, - outgoing: FuturesUnordered::new(), - pending_events: VecDeque::new(), - } - } - - /// Issue a new finality proof request. - /// - /// If the response doesn't arrive in time, or if the remote answers improperly, the target - /// will be disconnected. - pub fn send_request(&mut self, target: &PeerId, block_hash: B::Hash, request: Vec) { - let protobuf_rq = schema::v1::finality::FinalityProofRequest { - block_hash: block_hash.encode(), - request, - }; - - let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); - if let Err(err) = protobuf_rq.encode(&mut buf) { - log::warn!("failed to encode finality proof request {:?}: {:?}", protobuf_rq, err); - return; - } - - log::trace!("enqueueing finality proof request to {:?}: {:?}", target, protobuf_rq); - self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::Any, - event: OutboundProtocol { - request: buf, - block_hash, - max_response_size: self.config.max_response_len, - protocol: self.config.protocol.clone(), - }, - }); - } - - /// Callback, invoked when a new finality request has been received from remote. - fn on_finality_request(&mut self, peer: &PeerId, request: &schema::v1::finality::FinalityProofRequest) - -> Result - { - let block_hash = Decode::decode(&mut request.block_hash.as_ref())?; - - log::trace!(target: "sync", "Finality proof request from {} for {}", peer, block_hash); - - // Note that an empty Vec is sent if no proof is available. - let finality_proof = if let Some(provider) = &self.finality_proof_provider { - provider - .prove_finality(block_hash, &request.request)? - .unwrap_or_default() - } else { - log::error!("Answering a finality proof request while finality provider is empty"); - return Err(From::from("Empty finality proof provider".to_string())) - }; - - Ok(schema::v1::finality::FinalityProofResponse { proof: finality_proof }) - } -} - -impl NetworkBehaviour for FinalityProofRequests -where - B: Block -{ - type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; - type OutEvent = Event; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_len: self.config.max_request_len, - protocol: if self.finality_proof_provider.is_some() { - Some(self.config.protocol.clone()) - } else { - None - }, - marker: PhantomData, - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _peer: &PeerId) { - } - - fn inject_disconnected(&mut self, _peer: &PeerId) { - } - - fn inject_event( - &mut self, - peer: PeerId, - connection: ConnectionId, - event: NodeEvent - ) { - match event { - NodeEvent::Request(request, mut stream) => { - match self.on_finality_request(&peer, &request) { - Ok(res) => { - log::trace!("enqueueing finality response for peer {}", peer); - let mut data = Vec::with_capacity(res.encoded_len()); - if let Err(e) = res.encode(&mut data) { - log::debug!("error encoding finality response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing finality response: {}", e) - } - }; - self.outgoing.push(future.boxed()) - } - } - Err(e) => log::debug!("error handling finality request from peer {}: {}", peer, e) - } - } - NodeEvent::Response(response, block_hash) => { - let ev = Event::Response { - peer, - block_hash, - proof: response.proof, - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) - -> Poll, Event>> - { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ev); - } - - while let Poll::Ready(Some(_)) = self.outgoing.poll_next_unpin(cx) {} - Poll::Pending - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum NodeEvent { - /// Incoming request from remote and substream to use for the response. - Request(schema::v1::finality::FinalityProofRequest, T), - /// Incoming response from remote. - Response(schema::v1::finality::FinalityProofResponse, B::Hash), -} - -/// Substream upgrade protocol. -/// -/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) -/// which will be handled by the `FinalityProofRequests` behaviour, i.e. the request -/// will become visible via `inject_node_event` which then dispatches to the -/// relevant callback to process the message and prepare a response. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_len: usize, - /// The protocol to use during upgrade negotiation. If `None`, then the incoming protocol - /// is simply disabled. - protocol: Option, - /// Marker to pin the block type. - marker: PhantomData, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - // This iterator will return either 0 elements if `self.protocol` is `None`, or 1 element if - // it is `Some`. - type InfoIter = std::option::IntoIter; - - fn protocol_info(&self) -> Self::InfoIter { - self.protocol.clone().into_iter() - } -} - -impl InboundUpgrade for InboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - let len = self.max_request_len; - let vec = read_one(&mut s, len).await?; - match schema::v1::finality::FinalityProofRequest::decode(&vec[..]) { - Ok(r) => Ok(NodeEvent::Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// Block hash that has been requested. - block_hash: B::Hash, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - schema::v1::finality::FinalityProofResponse::decode(&vec[..]) - .map(|r| NodeEvent::Response(r, self.block_hash)) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }.boxed() - } -} diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs index 9d20229288a42..4e6845e341261 100644 --- a/client/network/src/gossip.rs +++ b/client/network/src/gossip.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -41,7 +41,7 @@ //! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same //! order as they have been sent. //! It is possible, in the situation of disconnects and reconnects, that messages arrive in a -//! different order. See also https://github.com/paritytech/substrate/issues/6756. +//! different order. See also . //! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or //! if some other code uses the [`NetworkService`] to send notifications to this combination or //! peer and protocol, then the notifications will be interleaved in an unpredictable way. @@ -53,8 +53,9 @@ use async_std::sync::{Mutex, MutexGuard}; use futures::prelude::*; use futures::channel::mpsc::{channel, Receiver, Sender}; use libp2p::PeerId; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, collections::VecDeque, fmt, sync::Arc, @@ -82,7 +83,7 @@ impl QueuedSender { pub fn new( service: Arc>, peer_id: PeerId, - protocol: ConsensusEngineId, + protocol: Cow<'static, str>, queue_size_limit: usize, messages_encode: F ) -> (Self, impl Future + Send + 'static) @@ -193,7 +194,7 @@ async fn create_background_future Vec> mut wait_for_sender: Receiver<()>, service: Arc>, peer_id: PeerId, - protocol: ConsensusEngineId, + protocol: Cow<'static, str>, shared_message_queue: SharedMessageQueue, messages_encode: F, ) { @@ -212,7 +213,7 @@ async fn create_background_future Vec> // Starting from below, we try to send the message. If an error happens when sending, // the only sane option we have is to silently discard the message. - let sender = match service.notification_sender(peer_id.clone(), protocol) { + let sender = match service.notification_sender(peer_id.clone(), protocol.clone()) { Ok(s) => s, Err(_) => continue, }; diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index 0f01ed81bffcb..c0b8c5e730a11 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,11 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, gossip::QueuedSender, Event, NetworkService, NetworkWorker}; +use crate::block_request_handler::BlockRequestHandler; +use crate::light_client_requests::handler::LightClientRequestHandler; +use crate::gossip::QueuedSender; +use crate::{config, Event, NetworkService, NetworkWorker}; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -33,7 +36,7 @@ type TestNetworkService = NetworkService< /// /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node(config: config::NetworkConfiguration) +fn build_test_full_node(network_config: config::NetworkConfiguration) -> (Arc, impl Stream) { let client = Arc::new( @@ -86,26 +89,45 @@ fn build_test_full_node(config: config::NetworkConfiguration) PassThroughVerifier(false), Box::new(client.clone()), None, - None, &sp_core::testing::TaskExecutor::new(), None, )); + let protocol_id = config::ProtocolId::from("/test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + client.clone(), + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + client.clone(), + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, - network_config: config, + network_config, chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), - protocol_id: config::ProtocolId::from("/test-protocol-name"), + protocol_id, import_queue, block_announce_validator: Box::new( sp_consensus::block_validation::DefaultBlockAnnounceValidator, ), metrics_registry: None, + block_request_protocol_config, + light_client_request_protocol_config, }) .unwrap(); @@ -120,29 +142,43 @@ fn build_test_full_node(config: config::NetworkConfiguration) (service, event_stream) } -const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> (Arc, impl Stream, Arc, impl Stream) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + max_notification_size: 1024 * 1024, + set_config: Default::default() + } + ], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + .. Default::default() + }, + } + ], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); @@ -165,7 +201,7 @@ fn basic_works() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => { for message in messages { - assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, &b"message"[..]); received_notifications += 1; } @@ -181,7 +217,7 @@ fn basic_works() { async_std::task::block_on(async move { let (mut sender, bg_future) = - QueuedSender::new(node1, node2_id, ENGINE_ID, NUM_NOTIFS, |msg| msg); + QueuedSender::new(node1, node2_id, PROTOCOL_NAME, NUM_NOTIFS, |msg| msg); async_std::task::spawn(bg_future); // Wait for the `NotificationStreamOpened`. diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 3fd01c33dcf5f..5bd20927869e0 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -141,8 +141,9 @@ //! block announces are pushed to other nodes. The handshake is empty on both sides. The message //! format is a SCALE-encoded tuple containing a block header followed with an opaque list of //! bytes containing some data associated with this block announcement, e.g. a candidate message. -//! - Notifications protocols that are registered using the `register_notifications_protocol` -//! method. For example: `/paritytech/grandpa/1`. See below for more information. +//! - Notifications protocols that are registered using +//! `NetworkConfiguration::notifications_protocols`. For example: `/paritytech/grandpa/1`. See +//! below for more information. //! //! ## The legacy Substrate substream //! @@ -245,12 +246,9 @@ //! mod behaviour; -mod block_requests; mod chain; mod peer_info; mod discovery; -mod finality_requests; -mod light_client_handler; mod on_demand_layer; mod protocol; mod request_responses; @@ -259,6 +257,9 @@ mod service; mod transport; mod utils; +pub mod block_request_handler; +pub mod bitswap; +pub mod light_client_requests; pub mod config; pub mod error; pub mod gossip; @@ -269,7 +270,7 @@ pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::{event::{DhtEvent, Event, ObservedRole}, sync::SyncState, PeerInfo}; pub use service::{ NetworkService, NetworkWorker, RequestFailure, OutboundFailure, NotificationSender, - NotificationSenderReady, + NotificationSenderReady, IfDisconnected, }; pub use sc_peerset::ReputationChange; @@ -284,6 +285,9 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// two peers, the per-peer connection limit is not set to 1 but 2. const MAX_CONNECTIONS_PER_PEER: usize = 2; +/// The maximum number of concurrent established connections that were incoming. +const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; + /// Minimum Requirements for a Hash within Networking pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs deleted file mode 100644 index e7c5e9c1c9b95..0000000000000 --- a/client/network/src/light_client_handler.rs +++ /dev/null @@ -1,2056 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! [`NetworkBehaviour`] implementation which handles light client requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Requests and responses are -//! encoded as protocol buffers (cf. `api.v1.proto`). -//! -//! For every outgoing request we likewise open a separate substream. - -#![allow(unused)] - -use bytes::Bytes; -use codec::{self, Encode, Decode}; -use crate::{ - block_requests::build_protobuf_block_request, - chain::Client, - config::ProtocolId, - protocol::message::{BlockAttributes, Direction, FromBlock}, - schema, -}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{OutboundUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol, - } -}; -use nohash_hasher::IntMap; -use prost::Message; -use sc_client_api::{ - StorageProof, - light::{ - self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, - RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, - } -}; -use sc_peerset::ReputationChange; -use sp_core::{ - storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, - hexdisplay::HexDisplay, -}; -use smallvec::SmallVec; -use sp_blockchain::{Error as ClientError}; -use sp_runtime::{ - traits::{Block, Header, NumberFor, Zero}, - generic::BlockId, -}; -use std::{ - collections::{BTreeMap, VecDeque, HashMap}, - iter, - io, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::Void; -use wasm_timer::Instant; - -/// Reputation change for a peer when a request timed out. -pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8); - -/// Configuration options for `LightClientHandler` behaviour. -#[derive(Debug, Clone)] -pub struct Config { - max_request_size: usize, - max_response_size: usize, - max_pending_requests: usize, - inactivity_timeout: Duration, - request_timeout: Duration, - light_protocol: Bytes, - block_protocol: Bytes, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. request size = 1 MiB - /// - max. response size = 16 MiB - /// - max. pending requests = 128 - /// - inactivity timeout = 15s - /// - request timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_request_size: 1 * 1024 * 1024, - max_response_size: 16 * 1024 * 1024, - max_pending_requests: 128, - inactivity_timeout: Duration::from_secs(15), - request_timeout: Duration::from_secs(15), - light_protocol: Bytes::new(), - block_protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. length in bytes of a request. - pub fn set_max_request_size(&mut self, v: usize) -> &mut Self { - self.max_request_size = v; - self - } - - /// Limit the max. length in bytes of a response. - pub fn set_max_response_size(&mut self, v: usize) -> &mut Self { - self.max_response_size = v; - self - } - - /// Limit the max. number of pending requests. - pub fn set_max_pending_requests(&mut self, v: usize) -> &mut Self { - self.max_pending_requests = v; - self - } - - /// Limit the max. duration the connection may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Limit the max. request duration. - pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { - self.request_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut vl = Vec::new(); - vl.extend_from_slice(b"/"); - vl.extend_from_slice(id.as_ref().as_bytes()); - vl.extend_from_slice(b"/light/2"); - self.light_protocol = vl.into(); - - let mut vb = Vec::new(); - vb.extend_from_slice(b"/"); - vb.extend_from_slice(id.as_ref().as_bytes()); - vb.extend_from_slice(b"/sync/2"); - self.block_protocol = vb.into(); - - self - } -} - -/// Possible errors while handling light clients. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// There are currently too many pending request. - #[error("too many pending requests")] - TooManyRequests, - /// The response type does not correspond to the issued request. - #[error("unexpected response")] - UnexpectedResponse, - /// A bad request has been received. - #[error("bad request: {0}")] - BadRequest(&'static str), - /// The chain client errored. - #[error("client error: {0}")] - Client(#[from] ClientError), - /// Encoding or decoding of some data failed. - #[error("codec error: {0}")] - Codec(#[from] codec::Error), -} - -/// The possible light client requests we support. -/// -/// The associated `oneshot::Sender` will be used to convey the result of -/// their request back to them (cf. `Reply`). -// -// This is modeled after light_dispatch.rs's `RequestData` which is not -// used because we currently only support a subset of those. -#[derive(Debug)] -pub enum Request { - Body { - request: RemoteBodyRequest, - sender: oneshot::Sender, ClientError>> - }, - Header { - request: light::RemoteHeaderRequest, - sender: oneshot::Sender> - }, - Read { - request: light::RemoteReadRequest, - sender: oneshot::Sender, Option>>, ClientError>> - }, - ReadChild { - request: light::RemoteReadChildRequest, - sender: oneshot::Sender, Option>>, ClientError>> - }, - Call { - request: light::RemoteCallRequest, - sender: oneshot::Sender, ClientError>> - }, - Changes { - request: light::RemoteChangesRequest, - sender: oneshot::Sender, u32)>, ClientError>> - } -} - -/// The data to send back to the light client over the oneshot channel. -// -// It is unified here in order to be able to return it as a function -// result instead of delivering it to the client as a side effect of -// response processing. -#[derive(Debug)] -enum Reply { - VecU8(Vec), - VecNumberU32(Vec<(::Number, u32)>), - MapVecU8OptVecU8(HashMap, Option>>), - Header(B::Header), - Extrinsics(Vec), -} - -/// Augments a light client request with metadata. -#[derive(Debug)] -struct RequestWrapper { - /// Time when this value was created. - timestamp: Instant, - /// Remaining retries. - retries: usize, - /// The actual request. - request: Request, - /// The peer to send the request to, e.g. `PeerId`. - peer: P, - /// The connection to use for sending the request. - connection: Option, -} - -/// Information we have about some peer. -#[derive(Debug)] -struct PeerInfo { - connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>, - best_block: Option>, - status: PeerStatus, -} - -impl Default for PeerInfo { - fn default() -> Self { - PeerInfo { - connections: SmallVec::new(), - best_block: None, - status: PeerStatus::Idle, - } - } -} - -type RequestId = u64; - -/// A peer is either idle or busy processing a request from us. -#[derive(Debug, Clone, PartialEq, Eq)] -enum PeerStatus { - /// The peer is available. - Idle, - /// We wait for the peer to return us a response for the given request ID. - BusyWith(RequestId), -} - -/// The light client handler behaviour. -pub struct LightClientHandler { - /// This behaviour's configuration. - config: Config, - /// Blockchain client. - chain: Arc>, - /// Verifies that received responses are correct. - checker: Arc>, - /// Peer information (addresses, their best block, etc.) - peers: HashMap>, - /// Futures sending back response to remote clients. - responses: FuturesUnordered>, - /// Pending (local) requests. - pending_requests: VecDeque>, - /// Requests on their way to remote peers. - outstanding: IntMap>, - /// (Local) Request ID counter - next_request_id: RequestId, - /// Handle to use for reporting misbehaviour of peers. - peerset: sc_peerset::PeersetHandle, -} - -impl LightClientHandler -where - B: Block, -{ - /// Construct a new light client handler. - pub fn new( - cfg: Config, - chain: Arc>, - checker: Arc>, - peerset: sc_peerset::PeersetHandle, - ) -> Self { - LightClientHandler { - config: cfg, - chain, - checker, - peers: HashMap::new(), - responses: FuturesUnordered::new(), - pending_requests: VecDeque::new(), - outstanding: IntMap::default(), - next_request_id: 1, - peerset, - } - } - - /// We rely on external information about peers best blocks as we lack the - /// means to determine it ourselves. - pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { - if let Some(info) = self.peers.get_mut(peer) { - log::trace!("new best block for {:?}: {:?}", peer, num); - info.best_block = Some(num) - } - } - - /// Issue a new light client request. - pub fn request(&mut self, req: Request) -> Result<(), Error> { - if self.pending_requests.len() >= self.config.max_pending_requests { - return Err(Error::TooManyRequests) - } - let rw = RequestWrapper { - timestamp: Instant::now(), - retries: retries(&req), - request: req, - peer: (), // we do not know the peer yet - connection: None, - }; - self.pending_requests.push_back(rw); - Ok(()) - } - - fn next_request_id(&mut self) -> RequestId { - let id = self.next_request_id; - self.next_request_id += 1; - id - } - - /// Remove the given peer. - /// - /// If we have a request to this peer in flight, we move it back to - /// the pending requests queue. - fn remove_peer(&mut self, peer: &PeerId) { - if let Some(id) = self.outstanding.iter().find(|(_, rw)| &rw.peer == peer).map(|(k, _)| *k) { - let rw = self.outstanding.remove(&id).expect("key belongs to entry in this map"); - let rw = RequestWrapper { - timestamp: rw.timestamp, - retries: rw.retries, - request: rw.request, - peer: (), // need to find another peer - connection: None, - }; - self.pending_requests.push_back(rw); - } - self.peers.remove(peer); - } - - /// Prepares a request by selecting a suitable peer and connection to send it to. - /// - /// If there is currently no suitable peer for the request, the given request - /// is returned as `Err`. - fn prepare_request(&self, req: RequestWrapper) - -> Result<(PeerId, RequestWrapper), RequestWrapper> - { - let number = required_block(&req.request); - - let mut peer = None; - for (peer_id, peer_info) in self.peers.iter() { - if peer_info.status == PeerStatus::Idle { - match peer_info.best_block { - Some(n) => if n >= number { - peer = Some((peer_id, peer_info)); - break - }, - None => peer = Some((peer_id, peer_info)) - } - } - } - - if let Some((peer_id, peer_info)) = peer { - let connection = peer_info.connections.iter().next().map(|(id, _)| *id); - let rw = RequestWrapper { - timestamp: req.timestamp, - retries: req.retries, - request: req.request, - peer: peer_id.clone(), - connection, - }; - Ok((peer_id.clone(), rw)) - } else { - Err(req) - } - } - - /// Process a local request's response from remote. - /// - /// If successful, this will give us the actual, checked data we should be - /// sending back to the client, otherwise an error. - fn on_response - ( &mut self - , peer: &PeerId - , request: &Request - , response: Response - ) -> Result, Error> - { - log::trace!("response from {}", peer); - match response { - Response::Light(r) => self.on_response_light(peer, request, r), - Response::Block(r) => self.on_response_block(peer, request, r), - } - } - - fn on_response_light - ( &mut self - , peer: &PeerId - , request: &Request - , response: schema::v1::light::Response - ) -> Result, Error> - { - use schema::v1::light::response::Response; - match response.response { - Some(Response::RemoteCallResponse(response)) => - if let Request::Call { request , .. } = request { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_execution_proof(request, proof)?; - Ok(Reply::VecU8(reply)) - } else { - Err(Error::UnexpectedResponse) - } - Some(Response::RemoteReadResponse(response)) => - match request { - Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - _ => Err(Error::UnexpectedResponse) - } - Some(Response::RemoteChangesResponse(response)) => - if let Request::Changes { request, .. } = request { - let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; - let roots = { - let mut r = BTreeMap::new(); - for pair in response.roots { - let k = Decode::decode(&mut pair.fst.as_ref())?; - let v = Decode::decode(&mut pair.snd.as_ref())?; - r.insert(k, v); - } - r - }; - let reply = self.checker.check_changes_proof(&request, light::ChangesProof { - max_block, - proof: response.proof, - roots, - roots_proof, - })?; - Ok(Reply::VecNumberU32(reply)) - } else { - Err(Error::UnexpectedResponse) - } - Some(Response::RemoteHeaderResponse(response)) => - if let Request::Header { request, .. } = request { - let header = - if response.header.is_empty() { - None - } else { - Some(Decode::decode(&mut response.header.as_ref())?) - }; - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_header_proof(&request, header, proof)?; - Ok(Reply::Header(reply)) - } else { - Err(Error::UnexpectedResponse) - } - None => Err(Error::UnexpectedResponse) - } - } - - fn on_response_block - ( &mut self - , peer: &PeerId - , request: &Request - , response: schema::v1::BlockResponse - ) -> Result, Error> - { - let request = if let Request::Body { request , .. } = &request { - request - } else { - return Err(Error::UnexpectedResponse); - }; - - let body: Vec<_> = match response.blocks.into_iter().next() { - Some(b) => b.body, - None => return Err(Error::UnexpectedResponse), - }; - - let body = body.into_iter() - .map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) - .collect::>()?; - - let body = self.checker.check_body_proof(&request, body)?; - Ok(Reply::Extrinsics(body)) - } - - fn on_remote_call_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteCallRequest - ) -> Result - { - log::trace!("remote call request from {} ({} at {:?})", - peer, - request.method, - request.block, - ); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data) { - Ok((_, proof)) => proof, - Err(e) => { - log::trace!("remote call request from {} ({} at {:?}) failed with: {}", - peer, - request.method, - request.block, - e, - ); - StorageProof::empty() - } - }; - - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteCallResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_read_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteReadRequest - ) -> Result - { - if request.keys.is_empty() { - log::debug!("invalid remote read request sent by {}", peer); - return Err(Error::BadRequest("remote read request without keys")) - } - - log::trace!("remote read request from {} ({} at {:?})", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = match self.chain.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read request from {} ({} at {:?}) failed with: {}", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - }; - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_read_child_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteReadChildRequest - ) -> Result - { - if request.keys.is_empty() { - log::debug!("invalid remote child read request sent by {}", peer); - return Err(Error::BadRequest("remove read child request without keys")) - } - - log::trace!("remote read child request from {} ({} {} at {:?})", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match ChildType::from_prefixed_key(prefixed_key) { - Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), - None => Err("Invalid child storage key".into()), - }; - let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( - &BlockId::Hash(block), - &child_info, - &mut request.keys.iter().map(AsRef::as_ref) - )) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - }; - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_header_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteHeaderRequest - ) -> Result - { - log::trace!("remote header proof request from {} ({:?})", peer, request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof), - Err(error) => { - log::trace!("remote header proof request from {} ({:?}) failed with: {}", - peer, - request.block, - error); - (Default::default(), StorageProof::empty()) - } - }; - - let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; - schema::v1::light::response::Response::RemoteHeaderResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_changes_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteChangesRequest - ) -> Result - { - log::trace!("remote changes proof request from {} for key {} ({:?}..{:?})", - peer, - if !request.storage_key.is_empty() { - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last); - - let first = Decode::decode(&mut request.first.as_ref())?; - let last = Decode::decode(&mut request.last.as_ref())?; - let min = Decode::decode(&mut request.min.as_ref())?; - let max = Decode::decode(&mut request.max.as_ref())?; - let key = StorageKey(request.key.clone()); - let storage_key = if request.storage_key.is_empty() { - None - } else { - Some(PrefixedStorageKey::new_ref(&request.storage_key)) - }; - - let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", - peer, - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), - request.first, - request.last, - error); - - light::ChangesProof:: { - max_block: Zero::zero(), - proof: Vec::new(), - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; - - let response = { - let r = schema::v1::light::RemoteChangesResponse { - max: proof.max_block.encode(), - proof: proof.proof, - roots: proof.roots.into_iter() - .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) - .collect(), - roots_proof: proof.roots_proof.encode(), - }; - schema::v1::light::response::Response::RemoteChangesResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } -} - -impl NetworkBehaviour for LightClientHandler -where - B: Block -{ - type ProtocolsHandler = OneShotHandler>; - type OutEvent = Void; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_size: self.config.max_request_size, - protocol: self.config.light_protocol.clone(), - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { - self.peers.get(peer) - .map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect()) - .unwrap_or_default() - } - - fn inject_connected(&mut self, peer: &PeerId) { - } - - fn inject_connection_established(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { - let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), - ConnectedPoint::Dialer { address } => address.clone() - }; - - log::trace!("peer {} connected with address {}", peer, peer_address); - - let entry = self.peers.entry(peer.clone()).or_default(); - entry.connections.push((*conn, peer_address)); - } - - fn inject_disconnected(&mut self, peer: &PeerId) { - log::trace!("peer {} disconnected", peer); - self.remove_peer(peer) - } - - fn inject_connection_closed(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { - let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address } => address - }; - - log::trace!("connection to peer {} closed: {}", peer, peer_address); - - if let Some(info) = self.peers.get_mut(peer) { - info.connections.retain(|(c, _)| c != conn) - } - - // Add any outstanding requests on the closed connection back to the - // pending requests. - if let Some(id) = self.outstanding.iter() - .find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*) - .map(|(id, _)| *id) - { - let rw = self.outstanding.remove(&id).expect("by (*)"); - let rw = RequestWrapper { - timestamp: rw.timestamp, - retries: rw.retries, - request: rw.request, - peer: (), // need to find another peer - connection: None, - }; - self.pending_requests.push_back(rw); - } - } - - fn inject_event(&mut self, peer: PeerId, conn: ConnectionId, event: Event) { - match event { - // An incoming request from remote has been received. - Event::Request(request, mut stream) => { - log::trace!("incoming request from {}", peer); - let result = match &request.request { - Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => - self.on_remote_call_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => - self.on_remote_read_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => - self.on_remote_header_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => - self.on_remote_read_child_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => - self.on_remote_changes_request(&peer, r), - None => { - log::debug!("ignoring request without request data from peer {}", peer); - return - } - }; - match result { - Ok(response) => { - log::trace!("enqueueing response for peer {}", peer); - let mut data = Vec::new(); - if let Err(e) = response.encode(&mut data) { - log::debug!("error encoding response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing response: {}", e) - } - }; - self.responses.push(future.boxed()) - } - } - Err(Error::BadRequest(_)) => { - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new(-(1 << 12), "bad request")) - } - Err(e) => log::debug!("error handling request from peer {}: {}", peer, e) - } - } - // A response to one of our own requests has been received. - Event::Response(id, response) => { - if let Some(request) = self.outstanding.remove(&id) { - // We first just check if the response originates from the expected peer - // and connection. - if request.peer != peer { - log::debug!("Expected response from {} instead of {}.", request.peer, peer); - self.outstanding.insert(id, request); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); - return - } - - if let Some(info) = self.peers.get_mut(&peer) { - if info.status != PeerStatus::BusyWith(id) { - // If we get here, something is wrong with our internal handling of peer - // status information. At any time, a single peer processes at most one - // request from us and its status should contain the request ID we are - // expecting a response for. If a peer would send us a response with a - // random ID, we should not have an entry for it with this peer ID in - // our `outstanding` map, so a malicious peer should not be able to get - // us here. It is our own fault and must be fixed! - panic!("unexpected peer status {:?} for {}", info.status, peer); - } - - info.status = PeerStatus::Idle; // Make peer available again. - - match self.on_response(&peer, &request.request, response) { - Ok(reply) => send_reply(Ok(reply), request.request), - Err(Error::UnexpectedResponse) => { - log::debug!("unexpected response {} from peer {}", id, peer); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("unexpected response from peer")); - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries, - request: request.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw); - } - Err(other) => { - log::debug!("error handling response {} from peer {}: {}", id, peer, other); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("invalid response from peer")); - if request.retries > 0 { - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries - 1, - request: request.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw) - } else { - send_reply(Err(ClientError::RemoteFetchFailed), request.request) - } - } - } - } else { - // If we get here, something is wrong with our internal handling of peers. - // We apparently have an entry in our `outstanding` map and the peer is the one we - // expected. So, if we can not find an entry for it in our peer information table, - // then these two collections are out of sync which must not happen and is a clear - // programmer error that must be fixed! - panic!("missing peer information for {}; response {}", peer, id); - } - } else { - log::debug!("unexpected response {} from peer {}", id, peer); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); - } - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) -> Poll> { - // Process response sending futures. - while let Poll::Ready(Some(_)) = self.responses.poll_next_unpin(cx) {} - - // If we have a pending request to send, try to find an available peer and send it. - let now = Instant::now(); - while let Some(mut request) = self.pending_requests.pop_front() { - if now > request.timestamp + self.config.request_timeout { - if request.retries == 0 { - send_reply(Err(ClientError::RemoteFetchFailed), request.request); - continue - } - request.timestamp = Instant::now(); - request.retries -= 1 - } - - - match self.prepare_request(request) { - Err(request) => { - self.pending_requests.push_front(request); - log::debug!("no peer available to send request to"); - break - } - Ok((peer, request)) => { - let request_bytes = match serialize_request(&request.request) { - Ok(bytes) => bytes, - Err(error) => { - log::debug!("failed to serialize request: {}", error); - send_reply(Err(ClientError::RemoteFetchFailed), request.request); - continue - } - }; - - let (expected, protocol) = match request.request { - Request::Body { .. } => - (ExpectedResponseTy::Block, self.config.block_protocol.clone()), - _ => - (ExpectedResponseTy::Light, self.config.light_protocol.clone()), - }; - - let peer_id = peer.clone(); - let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One); - - let request_id = self.next_request_id(); - if let Some(p) = self.peers.get_mut(&peer) { - p.status = PeerStatus::BusyWith(request_id); - } - self.outstanding.insert(request_id, request); - - let event = OutboundProtocol { - request_id, - request: request_bytes, - expected, - max_response_size: self.config.max_response_size, - protocol, - }; - - log::trace!("sending request {} to peer {}", request_id, peer_id); - - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event, - }) - } - } - } - - // Look for ongoing requests that have timed out. - let mut expired = Vec::new(); - for (id, rw) in &self.outstanding { - if now > rw.timestamp + self.config.request_timeout { - log::debug!("request {} timed out", id); - expired.push(*id) - } - } - for id in expired { - if let Some(rw) = self.outstanding.remove(&id) { - self.remove_peer(&rw.peer); - self.peerset.report_peer(rw.peer.clone(), - ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "light request timeout")); - if rw.retries == 0 { - send_reply(Err(ClientError::RemoteFetchFailed), rw.request); - continue - } - let rw = RequestWrapper { - timestamp: Instant::now(), - retries: rw.retries - 1, - request: rw.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw) - } - } - - Poll::Pending - } -} - -fn required_block(request: &Request) -> NumberFor { - match request { - Request::Body { request, .. } => *request.header.number(), - Request::Header { request, .. } => request.block, - Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), - Request::Call { request, .. } => *request.header.number(), - Request::Changes { request, .. } => request.max_block.0, - } -} - -fn retries(request: &Request) -> usize { - let rc = match request { - Request::Body { request, .. } => request.retry_count, - Request::Header { request, .. } => request.retry_count, - Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, - Request::Call { request, .. } => request.retry_count, - Request::Changes { request, .. } => request.retry_count, - }; - rc.unwrap_or(0) -} - -fn serialize_request(request: &Request) -> Result, prost::EncodeError> { - let request = match request { - Request::Body { request, .. } => { - let rq = build_protobuf_block_request::<_, NumberFor>( - BlockAttributes::BODY, - FromBlock::Hash(request.header.hash()), - None, - Direction::Ascending, - Some(1), - ); - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - return Ok(buf); - } - Request::Header { request, .. } => { - let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; - schema::v1::light::request::Request::RemoteHeaderRequest(r) - } - Request::Read { request, .. } => { - let r = schema::v1::light::RemoteReadRequest { - block: request.block.encode(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadRequest(r) - } - Request::ReadChild { request, .. } => { - let r = schema::v1::light::RemoteReadChildRequest { - block: request.block.encode(), - storage_key: request.storage_key.clone().into_inner(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadChildRequest(r) - } - Request::Call { request, .. } => { - let r = schema::v1::light::RemoteCallRequest { - block: request.block.encode(), - method: request.method.clone(), - data: request.call_data.clone(), - }; - schema::v1::light::request::Request::RemoteCallRequest(r) - } - Request::Changes { request, .. } => { - let r = schema::v1::light::RemoteChangesRequest { - first: request.first_block.1.encode(), - last: request.last_block.1.encode(), - min: request.tries_roots.1.encode(), - max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().map(|s| s.into_inner()) - .unwrap_or_default(), - key: request.key.clone(), - }; - schema::v1::light::request::Request::RemoteChangesRequest(r) - } - }; - - let rq = schema::v1::light::Request { request: Some(request) }; - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - Ok(buf) -} - -fn send_reply(result: Result, ClientError>, request: Request) { - fn send(item: T, sender: oneshot::Sender) { - let _ = sender.send(item); // It is okay if the other end already hung up. - } - match request { - Request::Body { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), - } - Request::Header { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Header(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request), - } - Request::Read { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), - } - Request::ReadChild { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), - } - Request::Call { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), - } - Request::Changes { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), - } - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum Event { - /// Incoming request from remote and substream to use for the response. - Request(schema::v1::light::Request, T), - /// Incoming response from remote. - Response(RequestId, Response), -} - -/// Incoming response from remote. -#[derive(Debug, Clone)] -pub enum Response { - /// Incoming light response from remote. - Light(schema::v1::light::Response), - /// Incoming block response from remote. - Block(schema::v1::BlockResponse), -} - -/// Substream upgrade protocol. -/// -/// Reads incoming requests from remote. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl InboundUpgrade for InboundProtocol -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - let vec = read_one(&mut s, self.max_request_size).await?; - match schema::v1::light::Request::decode(&vec[..]) { - Ok(r) => Ok(Event::Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }; - future.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// Local identifier for the request. Used to associate it with a response. - request_id: RequestId, - /// Kind of response expected for this request. - expected: ExpectedResponseTy, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -/// Type of response expected from the remote for this request. -#[derive(Debug, Clone)] -enum ExpectedResponseTy { - Light, - Block, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - match self.expected { - ExpectedResponseTy::Light => { - schema::v1::light::Response::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, Response::Light(r))) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }, - ExpectedResponseTy::Block => { - schema::v1::BlockResponse::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, Response::Block(r))) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - } - } - }; - future.boxed() - } -} - -fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { - if let (Some(first), Some(last)) = (first, last) { - if first == last { - HexDisplay::from(first).to_string() - } else { - format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) - } - } else { - String::from("n/a") - } -} - -#[cfg(test)] -mod tests { - use super::*; - use async_std::task; - use assert_matches::assert_matches; - use codec::Encode; - use crate::{ - chain::Client, - config::ProtocolId, - schema, - }; - use futures::{channel::oneshot, prelude::*}; - use libp2p::{ - PeerId, - Multiaddr, - core::{ - ConnectedPoint, - connection::ConnectionId, - identity, - muxing::{StreamMuxerBox, SubstreamRef}, - transport::{Transport, Boxed, memory::MemoryTransport}, - upgrade - }, - noise::{self, Keypair, X25519, NoiseConfig}, - swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, - yamux - }; - use sc_client_api::{StorageProof, RemoteReadChildRequest, FetchChecker}; - use sp_blockchain::{Error as ClientError}; - use sp_core::storage::ChildInfo; - use std::{ - collections::{HashMap, HashSet}, - io, - iter::{self, FromIterator}, - pin::Pin, - sync::Arc, - task::{Context, Poll} - }; - use sp_runtime::{generic::Header, traits::{BlakeTwo256, Block as BlockT, NumberFor}}; - use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus}; - use void::Void; - - type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - type Handler = LightClientHandler; - type Swarm = libp2p::swarm::Swarm; - - fn empty_proof() -> Vec { - StorageProof::empty().encode() - } - - fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { - let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); - let id_key = identity::Keypair::generate_ed25519(); - let dh_key = Keypair::::new().into_authentic(&id_key).unwrap(); - let local_peer = id_key.public().into_peer_id(); - let transport = MemoryTransport::default() - .upgrade(upgrade::Version::V1) - .authenticate(NoiseConfig::xx(dh_key).into_authenticated()) - .multiplex(yamux::Config::default()) - .boxed(); - Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer) - } - - struct DummyFetchChecker { - ok: bool, - _mark: std::marker::PhantomData - } - - impl light::FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - header: Option, - _remote_proof: StorageProof, - ) -> Result { - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_execution_proof( - &self, - _: &RemoteCallRequest, - _: StorageProof, - ) -> Result, ClientError> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_changes_proof( - &self, - _: &RemoteChangesRequest, - _: ChangesProof - ) -> Result, u32)>, ClientError> { - match self.ok { - true => Ok(vec![(100u32.into(), 2)]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_body_proof( - &self, - _: &RemoteBodyRequest, - body: Vec - ) -> Result, ClientError> { - match self.ok { - true => Ok(body), - false => Err(ClientError::Backend("Test error".into())), - } - } - } - - fn make_config() -> super::Config { - super::Config::new(&ProtocolId::from("foo")) - } - - fn dummy_header() -> sp_test_primitives::Header { - sp_test_primitives::Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - struct EmptyPollParams(PeerId); - - impl PollParameters for EmptyPollParams { - type SupportedProtocolsIter = iter::Empty>; - type ListenedAddressesIter = iter::Empty; - type ExternalAddressesIter = iter::Empty; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - iter::empty() - } - - fn listened_addresses(&self) -> Self::ListenedAddressesIter { - iter::empty() - } - - fn external_addresses(&self) -> Self::ExternalAddressesIter { - iter::empty() - } - - fn local_peer_id(&self) -> &PeerId { - &self.0 - } - } - - fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { - let cfg = sc_peerset::PeersetConfig { - in_peers: 128, - out_peers: 128, - bootnodes: Vec::new(), - reserved_only: false, - priority_groups: Vec::new(), - }; - sc_peerset::Peerset::from_config(cfg) - } - - fn make_behaviour - ( ok: bool - , ps: sc_peerset::PeersetHandle - , cf: super::Config - ) -> LightClientHandler - { - let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); - LightClientHandler::new(cf, client, checker, ps) - } - - fn empty_dialer() -> ConnectedPoint { - ConnectedPoint::Dialer { address: Multiaddr::empty() } - } - - fn poll(mut b: &mut LightClientHandler) -> Poll> { - let mut p = EmptyPollParams(PeerId::random()); - match future::poll_fn(|cx| Pin::new(&mut b).poll(cx, &mut p)).now_or_never() { - Some(a) => Poll::Ready(a), - None => Poll::Pending - } - } - - #[test] - fn disconnects_from_peer_if_told() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_disconnected(&peer); - assert_eq!(0, behaviour.peers.len()) - } - - #[test] - fn disconnects_from_peer_if_request_times_out() { - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_connected(&peer0); - behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer()); - behaviour.inject_connected(&peer1); - - // We now know about two peers. - assert_eq!(HashSet::from_iter(&[peer0.clone(), peer1.clone()]), behaviour.peers.keys().collect::>()); - - // No requests have been made yet. - assert!(behaviour.pending_requests.is_empty()); - assert!(behaviour.outstanding.is_empty()); - - // Issue our first request! - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - assert_eq!(1, behaviour.pending_requests.len()); - - // The behaviour should now attempt to send the request. - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => { - assert!(peer_id == peer0 || peer_id == peer1) - }); - - // And we should have one busy peer. - assert!({ - let (idle, busy): (Vec<_>, Vec<_>) = - behaviour.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); - - idle.len() == 1 && busy.len() == 1 - && (idle[0].0 == &peer0 || busy[0].0 == &peer0) - && (idle[0].0 == &peer1 || busy[0].0 == &peer1) - }); - - // No more pending requests, but one should be outstanding. - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - // We now set back the timestamp of the outstanding request to make it expire. - let request = behaviour.outstanding.values_mut().next().unwrap(); - request.timestamp -= make_config().request_timeout; - - // Make progress, but do not expect some action. - assert_matches!(poll(&mut behaviour), Poll::Pending); - - // The request should have timed out by now and the corresponding peer be removed. - assert_eq!(1, behaviour.peers.len()); - // Since we asked for one retry, the request should be back in the pending queue. - assert_eq!(1, behaviour.pending_requests.len()); - // No other request should be ongoing. - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(false, pset.1, make_config()); - // ^--- Making sure the response data check fails. - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - let request_id = *behaviour.outstanding.keys().next().unwrap(); - - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_unexpected_response() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - - // Some unsolicited response - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(2347895932, Response::Light(response))); - - assert!(behaviour.peers.is_empty()); - poll(&mut behaviour); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - let request_id = *behaviour.outstanding.keys().next().unwrap(); - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - let peer3 = PeerId::random(); - let peer4 = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(false, pset.1, make_config()); - // ^--- Making sure the response data check fails. - - let conn1 = ConnectionId::new(1); - behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer()); - behaviour.inject_connected(&peer1); - let conn2 = ConnectionId::new(2); - behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer()); - behaviour.inject_connected(&peer2); - let conn3 = ConnectionId::new(3); - behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer()); - behaviour.inject_connected(&peer3); - let conn4 = ConnectionId::new(3); - behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer()); - behaviour.inject_connected(&peer4); - assert_eq!(4, behaviour.peers.len()); - - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(3), // Attempt up to three retries. - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - for i in 1 ..= 3 { - // Construct an invalid response - let request_id = *behaviour.outstanding.keys().next().unwrap(); - let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)) - } - }; - let conn = ConnectionId::new(i); - behaviour.inject_event(responding_peer, conn, Event::Response(request_id, Response::Light(response.clone()))); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_matches!(chan.1.try_recv(), Ok(None)) - } - // Final invalid response - let request_id = *behaviour.outstanding.keys().next().unwrap(); - let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - behaviour.inject_event(responding_peer, conn4, Event::Response(request_id, Response::Light(response))); - assert_matches!(poll(&mut behaviour), Poll::Pending); - assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) - } - - fn issue_request(request: Request) { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let response = match request { - Request::Body { .. } => unimplemented!(), - Request::Header{..} => { - let r = schema::v1::light::RemoteHeaderResponse { - header: dummy_header().encode(), - proof: empty_proof() - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), - } - } - Request::Read{..} => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - } - Request::ReadChild{..} => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - } - Request::Call{..} => { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - } - Request::Changes{..} => { - let r = schema::v1::light::RemoteChangesResponse { - max: iter::repeat(1).take(32).collect(), - proof: Vec::new(), - roots: Vec::new(), - roots_proof: empty_proof() - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), - } - } - }; - - behaviour.request(request).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); - - behaviour.inject_event(peer.clone(), conn, Event::Response(1, Response::Light(response))); - - poll(&mut behaviour); - - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()) - } - - #[test] - fn receives_remote_call_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - issue_request(Request::Call { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::Read { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_child_response() { - let mut chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::ReadChild { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_header_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - issue_request(Request::Header { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_changes_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - issue_request(Request::Changes { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - fn send_receive(request: Request) { - // We start a swarm on the listening side which awaits incoming requests and answers them: - let local_pset = peerset(); - let local_listen_addr: libp2p::Multiaddr = libp2p::multiaddr::Protocol::Memory(rand::random()).into(); - let mut local_swarm = make_swarm(true, local_pset.1, make_config()); - Swarm::listen_on(&mut local_swarm, local_listen_addr.clone()).unwrap(); - - // We also start a swarm that makes requests and awaits responses: - let remote_pset = peerset(); - let mut remote_swarm = make_swarm(true, remote_pset.1, make_config()); - - // We now schedule a request, dial the remote and let the two swarm work it out: - remote_swarm.request(request).unwrap(); - Swarm::dial_addr(&mut remote_swarm, local_listen_addr).unwrap(); - - let future = { - let a = local_swarm.for_each(|_| future::ready(())); - let b = remote_swarm.for_each(|_| future::ready(())); - future::join(a, b).map(|_| ()) - }; - - task::spawn(future); - } - - #[test] - fn send_receive_call() { - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - send_receive(Request::Call { request, sender: chan.0 }); - assert_eq!(vec![42], task::block_on(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_execution_proof` - } - - #[test] - fn send_receive_read() { - let chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None - }; - send_receive(Request::Read { request, sender: chan.0 }); - assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_proof` - } - - #[test] - fn send_receive_read_child() { - let chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - send_receive(Request::ReadChild { request, sender: chan.0 }); - assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_child_proof` - } - - #[test] - fn send_receive_header() { - sp_tracing::try_init_simple(); - let chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - send_receive(Request::Header { request, sender: chan.0 }); - // The remote does not know block 1: - assert_matches!(task::block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); - } - - #[test] - fn send_receive_changes() { - let chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - send_receive(Request::Changes { request, sender: chan.0 }); - assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_changes_proof` - } - - #[test] - fn body_request_fields_encoded_properly() { - let (sender, _) = oneshot::channel(); - let serialized_request = serialize_request::(&Request::Body { - request: RemoteBodyRequest { - header: dummy_header(), - retry_count: None, - }, - sender, - }).unwrap(); - let deserialized_request = schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); - assert!( - BlockAttributes::from_be_u32(deserialized_request.fields) - .unwrap() - .contains(BlockAttributes::BODY) - ); - } -} diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs new file mode 100644 index 0000000000000..f859a35f45b24 --- /dev/null +++ b/client/network/src/light_client_requests.rs @@ -0,0 +1,334 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helpers for outgoing and incoming light client requests. + +/// For outgoing light client requests. +pub mod sender; +/// For incoming light client requests. +pub mod handler; + +use crate::config::ProtocolId; +use crate::request_responses::ProtocolConfig; + +use std::time::Duration; + +/// Generate the light client protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/light/2"); + s +} + +/// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { + ProtocolConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 1 * 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(15), + inbound_queue: None, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::request_responses::IncomingRequest; + use crate::config::ProtocolId; + + use assert_matches::assert_matches; + use futures::executor::{block_on, LocalPool}; + use futures::task::Spawn; + use futures::{channel::oneshot, prelude::*}; + use libp2p::PeerId; + use sc_client_api::StorageProof; + use sc_client_api::light::{RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest}; + use sc_client_api::light::{self, RemoteReadRequest, RemoteBodyRequest, ChangesProof}; + use sc_client_api::{FetchChecker, RemoteReadChildRequest}; + use sp_blockchain::Error as ClientError; + use sp_core::storage::ChildInfo; + use sp_runtime::generic::Header; + use sp_runtime::traits::{BlakeTwo256, Block as BlockT, NumberFor}; + use std::collections::HashMap; + use std::sync::Arc; + + pub struct DummyFetchChecker { + pub ok: bool, + pub _mark: std::marker::PhantomData, + } + + impl FetchChecker for DummyFetchChecker { + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest, + header: Option, + _remote_proof: StorageProof, + ) -> Result { + match self.ok { + true if header.is_some() => Ok(header.unwrap()), + _ => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_proof( + &self, + request: &RemoteReadRequest, + _: StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request + .keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect()), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_child_proof( + &self, + request: &RemoteReadChildRequest, + _: StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request + .keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect()), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_execution_proof( + &self, + _: &RemoteCallRequest, + _: StorageProof, + ) -> Result, ClientError> { + match self.ok { + true => Ok(vec![42]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_changes_proof( + &self, + _: &RemoteChangesRequest, + _: ChangesProof, + ) -> Result, u32)>, ClientError> { + match self.ok { + true => Ok(vec![(100u32.into(), 2)]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_body_proof( + &self, + _: &RemoteBodyRequest, + body: Vec, + ) -> Result, ClientError> { + match self.ok { + true => Ok(body), + false => Err(ClientError::Backend("Test error".into())), + } + } + } + + pub fn protocol_id() -> ProtocolId { + ProtocolId::from("test") + } + + pub fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { + let cfg = sc_peerset::SetConfig { + in_peers: 128, + out_peers: 128, + bootnodes: Default::default(), + reserved_only: false, + reserved_nodes: Default::default(), + }; + sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets: vec![cfg] }) + } + + pub fn dummy_header() -> sp_test_primitives::Header { + sp_test_primitives::Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + type Block = + sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; + + fn send_receive(request: sender::Request, pool: &LocalPool) { + let client = Arc::new(substrate_test_runtime_client::new()); + let (handler, protocol_config) = handler::LightClientRequestHandler::new(&protocol_id(), client); + pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = sender::LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + sender.inject_connected(PeerId::random()); + + sender.request(request).unwrap(); + let sender::OutEvent::SendRequest { pending_response, request, .. } = block_on(sender.next()).unwrap(); + let (tx, rx) = oneshot::channel(); + block_on(protocol_config.inbound_queue.unwrap().send(IncomingRequest { + peer: PeerId::random(), + payload: request, + pending_response: tx, + })).unwrap(); + pool.spawner().spawn_obj(async move { + pending_response.send(Ok(rx.await.unwrap().result.unwrap())).unwrap(); + }.boxed().into()).unwrap(); + + pool.spawner().spawn_obj(sender.for_each(|_| future::ready(())).boxed().into()).unwrap(); + } + + #[test] + fn send_receive_call() { + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + + let mut pool = LocalPool::new(); + send_receive(sender::Request::Call { + request, + sender: chan.0, + }, &pool); + assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_execution_proof` + } + + #[test] + fn send_receive_read() { + let chan = oneshot::channel(); + let request = light::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::Read { + request, + sender: chan.0, + }, &pool); + assert_eq!( + Some(vec![42]), + pool.run_until(chan.1) + .unwrap() + .unwrap() + .remove(&b":key"[..]) + .unwrap() + ); + // ^--- from `DummyFetchChecker::check_read_proof` + } + + #[test] + fn send_receive_read_child() { + let chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); + let request = light::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: child_info.prefixed_storage_key(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::ReadChild { + request, + sender: chan.0, + }, &pool); + assert_eq!( + Some(vec![42]), + pool.run_until(chan.1) + .unwrap() + .unwrap() + .remove(&b":key"[..]) + .unwrap() + ); + // ^--- from `DummyFetchChecker::check_read_child_proof` + } + + #[test] + fn send_receive_header() { + sp_tracing::try_init_simple(); + let chan = oneshot::channel(); + let request = light::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::Header { + request, + sender: chan.0, + }, &pool); + // The remote does not know block 1: + assert_matches!( + pool.run_until(chan.1).unwrap(), + Err(ClientError::RemoteFetchFailed) + ); + } + + #[test] + fn send_receive_changes() { + let chan = oneshot::channel(); + let request = light::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::Changes { + request, + sender: chan.0, + }, &pool); + assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_changes_proof` + } +} diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs new file mode 100644 index 0000000000000..08de99a0a5de4 --- /dev/null +++ b/client/network/src/light_client_requests/handler.rs @@ -0,0 +1,399 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for incoming light client requests. +//! +//! Handle (i.e. answer) incoming light client requests from a remote peer received via +//! [`crate::request_responses::RequestResponsesBehaviour`] with [`LightClientRequestHandler`]. + +use codec::{self, Encode, Decode}; +use crate::{ + chain::Client, + config::ProtocolId, + schema, + PeerId, +}; +use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; +use futures::{channel::mpsc, prelude::*}; +use prost::Message; +use sc_client_api::{ + StorageProof, + light +}; +use sc_peerset::ReputationChange; +use sp_core::{ + storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, + hexdisplay::HexDisplay, +}; +use sp_runtime::{ + traits::{Block, Zero}, + generic::BlockId, +}; +use std::{ + collections::{BTreeMap}, + sync::Arc, +}; +use log::debug; + +const LOG_TARGET: &str = "light-client-request-handler"; + +/// Handler for incoming light client requests from a remote peer. +pub struct LightClientRequestHandler { + request_receiver: mpsc::Receiver, + /// Blockchain client. + client: Arc>, +} + +impl LightClientRequestHandler { + /// Create a new [`BlockRequestHandler`]. + pub fn new( + protocol_id: &ProtocolId, + client: Arc>, + ) -> (Self, ProtocolConfig) { + // For now due to lack of data on light client request handling in production systems, this + // value is chosen to match the block request limit. + let (tx, request_receiver) = mpsc::channel(20); + + let mut protocol_config = super::generate_protocol_config(protocol_id); + protocol_config.inbound_queue = Some(tx); + + (Self { client, request_receiver }, protocol_config) + } + + /// Run [`LightClientRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(peer, payload) { + Ok(response_data) => { + let response = OutgoingResponse { result: Ok(response_data), reputation_changes: Vec::new() }; + match pending_response.send(response) { + Ok(()) => debug!( + target: LOG_TARGET, + "Handled light client request from {}.", + peer, + ), + Err(_) => debug!( + target: LOG_TARGET, + "Failed to handle light client request from {}: {}", + peer, HandleRequestError::SendResponse, + ), + }; + } , + Err(e) => { + debug!( + target: LOG_TARGET, + "Failed to handle light client request from {}: {}", + peer, e, + ); + + let reputation_changes = match e { + HandleRequestError::BadRequest(_) => { + vec![ReputationChange::new(-(1 << 12), "bad request")] + } + _ => Vec::new(), + }; + + let response = OutgoingResponse { result: Err(()), reputation_changes }; + if pending_response.send(response).is_err() { + debug!( + target: LOG_TARGET, + "Failed to handle light client request from {}: {}", + peer, HandleRequestError::SendResponse, + ); + }; + }, + } + } + } + + + fn handle_request( + &mut self, + peer: PeerId, + payload: Vec, + ) -> Result, HandleRequestError> { + let request = schema::v1::light::Request::decode(&payload[..])?; + + let response = match &request.request { + Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => + self.on_remote_call_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => + self.on_remote_read_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => + self.on_remote_header_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => + self.on_remote_read_child_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => + self.on_remote_changes_request(&peer, r)?, + None => { + return Err(HandleRequestError::BadRequest("Remote request without request data.")); + } + }; + + let mut data = Vec::new(); + response.encode(&mut data)?; + + Ok(data) + } + + fn on_remote_call_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteCallRequest, + ) -> Result { + log::trace!( + "Remote call request from {} ({} at {:?}).", + peer, request.method, request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = match self.client.execution_proof( + &BlockId::Hash(block), + &request.method, &request.data, + ) { + Ok((_, proof)) => proof, + Err(e) => { + log::trace!( + "remote call request from {} ({} at {:?}) failed with: {}", + peer, request.method, request.block, e, + ); + StorageProof::empty() + } + }; + + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteCallResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_read_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteReadRequest, + ) -> Result { + if request.keys.is_empty() { + log::debug!("Invalid remote read request sent by {}.", peer); + return Err(HandleRequestError::BadRequest("Remote read request without keys.")) + } + + log::trace!( + "Remote read request from {} ({} at {:?}).", + peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = match self.client.read_proof( + &BlockId::Hash(block), + &mut request.keys.iter().map(AsRef::as_ref), + ) { + Ok(proof) => proof, + Err(error) => { + log::trace!( + "remote read request from {} ({} at {:?}) failed with: {}", + peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, error, + ); + StorageProof::empty() + } + }; + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_read_child_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteReadChildRequest, + ) -> Result { + if request.keys.is_empty() { + log::debug!("Invalid remote child read request sent by {}.", peer); + return Err(HandleRequestError::BadRequest("Remove read child request without keys.")) + } + + log::trace!( + "Remote read child request from {} ({} {} at {:?}).", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match ChildType::from_prefixed_key(prefixed_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + let proof = match child_info.and_then(|child_info| self.client.read_child_proof( + &BlockId::Hash(block), + &child_info, + &mut request.keys.iter().map(AsRef::as_ref) + )) { + Ok(proof) => proof, + Err(error) => { + log::trace!( + "remote read child request from {} ({} {} at {:?}) failed with: {}", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error, + ); + StorageProof::empty() + } + }; + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_header_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteHeaderRequest, + ) -> Result { + log::trace!("Remote header proof request from {} ({:?}).", peer, request.block); + + let block = Decode::decode(&mut request.block.as_ref())?; + let (header, proof) = match self.client.header_proof(&BlockId::Number(block)) { + Ok((header, proof)) => (header.encode(), proof), + Err(error) => { + log::trace!( + "Remote header proof request from {} ({:?}) failed with: {}.", + peer, request.block, error + ); + (Default::default(), StorageProof::empty()) + } + }; + + let response = { + let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; + schema::v1::light::response::Response::RemoteHeaderResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_changes_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteChangesRequest, + ) -> Result { + log::trace!( + "Remote changes proof request from {} for key {} ({:?}..{:?}).", + peer, + if !request.storage_key.is_empty() { + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) + } else { + HexDisplay::from(&request.key).to_string() + }, + request.first, + request.last, + ); + + let first = Decode::decode(&mut request.first.as_ref())?; + let last = Decode::decode(&mut request.last.as_ref())?; + let min = Decode::decode(&mut request.min.as_ref())?; + let max = Decode::decode(&mut request.max.as_ref())?; + let key = StorageKey(request.key.clone()); + let storage_key = if request.storage_key.is_empty() { + None + } else { + Some(PrefixedStorageKey::new_ref(&request.storage_key)) + }; + + let proof = match self.client.key_changes_proof(first, last, min, max, storage_key, &key) { + Ok(proof) => proof, + Err(error) => { + log::trace!( + "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", + peer, + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), + request.first, + request.last, + error, + ); + + light::ChangesProof:: { + max_block: Zero::zero(), + proof: Vec::new(), + roots: BTreeMap::new(), + roots_proof: StorageProof::empty(), + } + } + }; + + let response = { + let r = schema::v1::light::RemoteChangesResponse { + max: proof.max_block.encode(), + proof: proof.proof, + roots: proof.roots.into_iter() + .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) + .collect(), + roots_proof: proof.roots_proof.encode(), + }; + schema::v1::light::response::Response::RemoteChangesResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to send response.")] + SendResponse, + /// A bad request has been received. + #[display(fmt = "bad request: {}", _0)] + BadRequest(&'static str), + /// Encoding or decoding of some data failed. + #[display(fmt = "codec error: {}", _0)] + Codec(codec::Error), +} + +fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { + if let (Some(first), Some(last)) = (first, last) { + if first == last { + HexDisplay::from(first).to_string() + } else { + format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) + } + } else { + String::from("n/a") + } +} diff --git a/client/network/src/light_client_requests/sender.rs b/client/network/src/light_client_requests/sender.rs new file mode 100644 index 0000000000000..652f465d6f250 --- /dev/null +++ b/client/network/src/light_client_requests/sender.rs @@ -0,0 +1,1343 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for outgoing light client requests. +//! +//! Call [`LightClientRequestSender::send_request`] to send out light client requests. It will: +//! +//! 1. Build the request. +//! +//! 2. Forward the request to [`crate::request_responses::RequestResponsesBehaviour`] via +//! [`OutEvent::SendRequest`]. +//! +//! 3. Wait for the response and forward the response via the [`oneshot::Sender`] provided earlier +//! with [`LightClientRequestSender::send_request`]. + +use codec::{self, Encode, Decode}; +use crate::{ + config::ProtocolId, + protocol::message::{BlockAttributes}, + schema, + PeerId, +}; +use crate::request_responses::{RequestFailure, OutboundFailure}; +use futures::{channel::{oneshot}, future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use prost::Message; +use sc_client_api::{ + light::{ + self, RemoteBodyRequest, + } +}; +use sc_peerset::ReputationChange; +use sp_blockchain::{Error as ClientError}; +use sp_runtime::{ + traits::{Block, Header, NumberFor}, +}; +use std::{ + collections::{BTreeMap, VecDeque, HashMap}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +mod rep { + use super::*; + + /// Reputation change for a peer when a request timed out. + pub const TIMEOUT: ReputationChange = ReputationChange::new(-(1 << 8), "light client request timeout"); + /// Reputation change for a peer when a request is refused. + pub const REFUSED: ReputationChange = ReputationChange::new(-(1 << 8), "light client request refused"); +} + +/// Configuration options for [`LightClientRequestSender`]. +#[derive(Debug, Clone)] +struct Config { + max_pending_requests: usize, + light_protocol: String, + block_protocol: String, +} + +impl Config { + /// Create a new [`LightClientRequestSender`] configuration. + pub fn new(id: &ProtocolId) -> Self { + Config { + max_pending_requests: 128, + light_protocol: super::generate_protocol_name(id), + block_protocol: crate::block_request_handler::generate_protocol_name(id), + } + } +} + +/// State machine helping to send out light client requests. +pub struct LightClientRequestSender { + /// This behaviour's configuration. + config: Config, + /// Verifies that received responses are correct. + checker: Arc>, + /// Peer information (addresses, their best block, etc.) + peers: HashMap>, + /// Pending (local) requests. + pending_requests: VecDeque>, + /// Requests on their way to remote peers. + sent_requests: FuturesUnordered, Result, RequestFailure>, oneshot::Canceled>), + >>, + /// Handle to use for reporting misbehaviour of peers. + peerset: sc_peerset::PeersetHandle, +} + +/// Augments a pending light client request with metadata. +#[derive(Debug)] +struct PendingRequest { + /// Remaining attempts. + attempts_left: usize, + /// The actual request. + request: Request, +} + +impl PendingRequest { + fn new(req: Request) -> Self { + PendingRequest { + // Number of retries + one for the initial attempt. + attempts_left: req.retries() + 1, + request: req, + } + } + + fn into_sent(self, peer_id: PeerId) -> SentRequest { + SentRequest { + attempts_left: self.attempts_left, + request: self.request, + peer: peer_id, + } + } +} + +/// Augments a light client request with metadata that is currently being send to a remote. +#[derive(Debug)] +struct SentRequest { + /// Remaining attempts. + attempts_left: usize, + /// The actual request. + request: Request, + /// The peer that the request is send to. + peer: PeerId, +} + +impl SentRequest { + fn into_pending(self) -> PendingRequest { + PendingRequest { + attempts_left: self.attempts_left, + request: self.request, + } + } +} + +impl Unpin for LightClientRequestSender {} + +impl LightClientRequestSender +where + B: Block, +{ + /// Construct a new light client handler. + pub fn new( + id: &ProtocolId, + checker: Arc>, + peerset: sc_peerset::PeersetHandle, + ) -> Self { + LightClientRequestSender { + config: Config::new(id), + checker, + peers: Default::default(), + pending_requests: Default::default(), + sent_requests: Default::default(), + peerset, + } + } + + /// We rely on external information about peers best blocks as we lack the + /// means to determine it ourselves. + pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { + if let Some(info) = self.peers.get_mut(peer) { + log::trace!("new best block for {:?}: {:?}", peer, num); + info.best_block = Some(num) + } + } + + /// Issue a new light client request. + pub fn request(&mut self, req: Request) -> Result<(), SendRequestError> { + if self.pending_requests.len() >= self.config.max_pending_requests { + return Err(SendRequestError::TooManyRequests) + } + self.pending_requests.push_back(PendingRequest::new(req)); + Ok(()) + } + + /// Remove the given peer. + /// + /// In-flight requests to the given peer might fail and be retried. See + /// [`::poll_next`]. + fn remove_peer(&mut self, peer: PeerId) { + self.peers.remove(&peer); + } + + /// Process a local request's response from remote. + /// + /// If successful, this will give us the actual, checked data we should be + /// sending back to the client, otherwise an error. + fn on_response( + &mut self, + peer: PeerId, + request: &Request, + response: Response, + ) -> Result, Error> { + log::trace!("response from {}", peer); + match response { + Response::Light(r) => self.on_response_light(request, r), + Response::Block(r) => self.on_response_block(request, r), + } + } + + fn on_response_light( + &mut self, + request: &Request, + response: schema::v1::light::Response, + ) -> Result, Error> { + use schema::v1::light::response::Response; + match response.response { + Some(Response::RemoteCallResponse(response)) => + if let Request::Call { request , .. } = request { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_execution_proof(request, proof)?; + Ok(Reply::VecU8(reply)) + } else { + Err(Error::UnexpectedResponse) + } + Some(Response::RemoteReadResponse(response)) => + match request { + Request::Read { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + } + Request::ReadChild { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_child_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + } + _ => Err(Error::UnexpectedResponse) + } + Some(Response::RemoteChangesResponse(response)) => + if let Request::Changes { request, .. } = request { + let max_block = Decode::decode(&mut response.max.as_ref())?; + let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; + let roots = { + let mut r = BTreeMap::new(); + for pair in response.roots { + let k = Decode::decode(&mut pair.fst.as_ref())?; + let v = Decode::decode(&mut pair.snd.as_ref())?; + r.insert(k, v); + } + r + }; + let reply = self.checker.check_changes_proof(&request, light::ChangesProof { + max_block, + proof: response.proof, + roots, + roots_proof, + })?; + Ok(Reply::VecNumberU32(reply)) + } else { + Err(Error::UnexpectedResponse) + } + Some(Response::RemoteHeaderResponse(response)) => + if let Request::Header { request, .. } = request { + let header = + if response.header.is_empty() { + None + } else { + Some(Decode::decode(&mut response.header.as_ref())?) + }; + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_header_proof(&request, header, proof)?; + Ok(Reply::Header(reply)) + } else { + Err(Error::UnexpectedResponse) + } + None => Err(Error::UnexpectedResponse) + } + } + + fn on_response_block( + &mut self, + request: &Request, + response: schema::v1::BlockResponse, + ) -> Result, Error> { + let request = if let Request::Body { request , .. } = &request { + request + } else { + return Err(Error::UnexpectedResponse); + }; + + let body: Vec<_> = match response.blocks.into_iter().next() { + Some(b) => b.body, + None => return Err(Error::UnexpectedResponse), + }; + + let body = body.into_iter() + .map(|extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) + .collect::>()?; + + let body = self.checker.check_body_proof(&request, body)?; + Ok(Reply::Extrinsics(body)) + } + + /// Signal that the node is connected to the given peer. + pub fn inject_connected(&mut self, peer: PeerId) { + let prev_entry = self.peers.insert(peer, Default::default()); + debug_assert!( + prev_entry.is_none(), + "Expect `inject_connected` to be called for disconnected peer.", + ); + } + + /// Signal that the node disconnected from the given peer. + pub fn inject_disconnected(&mut self, peer: PeerId) { + self.remove_peer(peer) + } +} + + +impl Stream for LightClientRequestSender { + type Item = OutEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // If we have received responses to previously sent requests, check them and pass them on. + while let Poll::Ready(Some((sent_request, request_result))) = self.sent_requests.poll_next_unpin(cx) { + if let Some(info) = self.peers.get_mut(&sent_request.peer) { + if info.status != PeerStatus::Busy { + // If we get here, something is wrong with our internal handling of peer status + // information. At any time, a single peer processes at most one request from + // us. A malicious peer should not be able to get us here. It is our own fault + // and must be fixed! + panic!("unexpected peer status {:?} for {}", info.status, sent_request.peer); + } + + info.status = PeerStatus::Idle; // Make peer available again. + } + + let request_result = match request_result { + Ok(r) => r, + Err(oneshot::Canceled) => { + log::debug!("Oneshot for request to peer {} was canceled.", sent_request.peer); + self.remove_peer(sent_request.peer); + self.peerset.report_peer(sent_request.peer, ReputationChange::new_fatal("no response from peer")); + self.pending_requests.push_back(sent_request.into_pending()); + continue; + } + }; + + let decoded_request_result = request_result.map(|response| { + if sent_request.request.is_block_request() { + schema::v1::BlockResponse::decode(&response[..]) + .map(|r| Response::Block(r)) + } else { + schema::v1::light::Response::decode(&response[..]) + .map(|r| Response::Light(r)) + } + }); + + let response = match decoded_request_result { + Ok(Ok(response)) => response, + Ok(Err(e)) => { + log::debug!("Failed to decode response from peer {}: {:?}.", sent_request.peer, e); + self.remove_peer(sent_request.peer); + self.peerset.report_peer(sent_request.peer, ReputationChange::new_fatal("invalid response from peer")); + self.pending_requests.push_back(sent_request.into_pending()); + continue; + }, + Err(e) => { + log::debug!("Request to peer {} failed with {:?}.", sent_request.peer, e); + + match e { + RequestFailure::NotConnected => { + self.remove_peer(sent_request.peer); + self.pending_requests.push_back(sent_request.into_pending()); + } + RequestFailure::UnknownProtocol => { + debug_assert!( + false, + "Light client and block request protocol should be known when \ + sending requests.", + ); + } + RequestFailure::Refused => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + rep::REFUSED, + ); + self.pending_requests.push_back(sent_request.into_pending()); + } + RequestFailure::Obsolete => { + debug_assert!( + false, + "Can not receive `RequestFailure::Obsolete` after dropping the \ + response receiver.", + ); + self.pending_requests.push_back(sent_request.into_pending()); + } + RequestFailure::Network(OutboundFailure::Timeout) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + rep::TIMEOUT, + ); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal( + "peer does not support light client or block request protocol", + ), + ); + self.pending_requests.push_back(sent_request.into_pending()); + } + RequestFailure::Network(OutboundFailure::DialFailure) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal( + "failed to dial peer", + ), + ); + self.pending_requests.push_back(sent_request.into_pending()); + } + RequestFailure::Network(OutboundFailure::ConnectionClosed) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal( + "connection to peer closed", + ), + ); + self.pending_requests.push_back(sent_request.into_pending()); + } + } + + continue; + } + }; + + match self.on_response(sent_request.peer, &sent_request.request, response) { + Ok(reply) => sent_request.request.return_reply(Ok(reply)), + Err(Error::UnexpectedResponse) => { + log::debug!("Unexpected response from peer {}.", sent_request.peer); + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal( + "unexpected response from peer", + ), + ); + self.pending_requests.push_back(sent_request.into_pending()); + } + Err(other) => { + log::debug!("error handling response from peer {}: {}", sent_request.peer, other); + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal( + "invalid response from peer", + ), + ); + self.pending_requests.push_back(sent_request.into_pending()) + } + } + } + + // If we have a pending request to send, try to find an available peer and send it. + while let Some(mut pending_request) = self.pending_requests.pop_front() { + if pending_request.attempts_left == 0 { + pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); + continue + } + + let protocol = if pending_request.request.is_block_request() { + self.config.block_protocol.clone() + } else { + self.config.light_protocol.clone() + }; + + // Out of all idle peers, find one who's best block is high enough, choose any idle peer + // if none exists. + let mut peer = None; + for (peer_id, peer_info) in self.peers.iter_mut() { + if peer_info.status == PeerStatus::Idle { + match peer_info.best_block { + Some(n) if n >= pending_request.request.required_block() => { + peer = Some((*peer_id, peer_info)); + break + }, + _ => peer = Some((*peer_id, peer_info)) + } + } + } + + // Break in case there is no idle peer. + let (peer_id, peer_info) = match peer { + Some((peer_id, peer_info)) => (peer_id, peer_info), + None => { + self.pending_requests.push_front(pending_request); + log::debug!("No peer available to send request to."); + + break; + } + }; + + let request_bytes = match pending_request.request.serialize_request() { + Ok(bytes) => bytes, + Err(error) => { + log::debug!("failed to serialize request: {}", error); + pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); + continue + } + }; + + let (tx, rx) = oneshot::channel(); + + peer_info.status = PeerStatus::Busy; + + pending_request.attempts_left -= 1; + + self.sent_requests.push(async move { + (pending_request.into_sent(peer_id), rx.await) + }.boxed()); + + return Poll::Ready(Some(OutEvent::SendRequest { + target: peer_id, + request: request_bytes, + pending_response: tx, + protocol_name: protocol, + })); + } + + Poll::Pending + } +} + +/// Events returned by [`LightClientRequestSender`]. +#[derive(Debug)] +pub enum OutEvent { + /// Emit a request to be send out on the network e.g. via [`crate::request_responses`]. + SendRequest { + /// The remote peer to send the request to. + target: PeerId, + /// The encoded request. + request: Vec, + /// The [`onehsot::Sender`] channel to pass the response to. + pending_response: oneshot::Sender, RequestFailure>>, + /// The name of the protocol to use to send the request. + protocol_name: String, + } +} + +/// Incoming response from remote. +#[derive(Debug, Clone)] +pub enum Response { + /// Incoming light response from remote. + Light(schema::v1::light::Response), + /// Incoming block response from remote. + Block(schema::v1::BlockResponse), +} + +/// Error returned by [`LightClientRequestSender::request`]. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum SendRequestError { + /// There are currently too many pending request. + #[display(fmt = "too many pending requests")] + TooManyRequests, +} + +/// Error type to propagate errors internally. +#[derive(Debug, derive_more::Display, derive_more::From)] +enum Error { + /// The response type does not correspond to the issued request. + #[display(fmt = "unexpected response")] + UnexpectedResponse, + /// Encoding or decoding of some data failed. + #[display(fmt = "codec error: {}", _0)] + Codec(codec::Error), + /// The chain client errored. + #[display(fmt = "client error: {}", _0)] + Client(ClientError), +} + +/// The data to send back to the light client over the oneshot channel. +// +// It is unified here in order to be able to return it as a function +// result instead of delivering it to the client as a side effect of +// response processing. +#[derive(Debug)] +enum Reply { + VecU8(Vec), + VecNumberU32(Vec<(::Number, u32)>), + MapVecU8OptVecU8(HashMap, Option>>), + Header(B::Header), + Extrinsics(Vec), +} + + +/// Information we have about some peer. +#[derive(Debug)] +struct PeerInfo { + best_block: Option>, + status: PeerStatus, +} + +impl Default for PeerInfo { + fn default() -> Self { + PeerInfo { + best_block: None, + status: PeerStatus::Idle, + } + } +} + +/// A peer is either idle or busy processing a request from us. +#[derive(Debug, Clone, PartialEq, Eq)] +enum PeerStatus { + /// The peer is available. + Idle, + /// We wait for the peer to return us a response for the given request ID. + Busy, +} + +/// The possible light client requests we support. +/// +/// The associated `oneshot::Sender` will be used to convey the result of +/// their request back to them (cf. `Reply`). +// +// This is modeled after light_dispatch.rs's `RequestData` which is not +// used because we currently only support a subset of those. +#[derive(Debug)] +pub enum Request { + /// Remote body request. + Body { + /// Request. + request: RemoteBodyRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, ClientError>> + }, + /// Remote header request. + Header { + /// Request. + request: light::RemoteHeaderRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender> + }, + /// Remote read request. + Read { + /// Request. + request: light::RemoteReadRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, Option>>, ClientError>> + }, + /// Remote read child request. + ReadChild { + /// Request. + request: light::RemoteReadChildRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, Option>>, ClientError>> + }, + /// Remote call request. + Call { + /// Request. + request: light::RemoteCallRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, ClientError>> + }, + /// Remote changes request. + Changes { + /// Request. + request: light::RemoteChangesRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, u32)>, ClientError>> + } +} + +impl Request { + fn is_block_request(&self) -> bool { + matches!(self, Request::Body { .. }) + } + + fn required_block(&self) -> NumberFor { + match self { + Request::Body { request, .. } => *request.header.number(), + Request::Header { request, .. } => request.block, + Request::Read { request, .. } => *request.header.number(), + Request::ReadChild { request, .. } => *request.header.number(), + Request::Call { request, .. } => *request.header.number(), + Request::Changes { request, .. } => request.max_block.0, + } + } + + fn retries(&self) -> usize { + let rc = match self { + Request::Body { request, .. } => request.retry_count, + Request::Header { request, .. } => request.retry_count, + Request::Read { request, .. } => request.retry_count, + Request::ReadChild { request, .. } => request.retry_count, + Request::Call { request, .. } => request.retry_count, + Request::Changes { request, .. } => request.retry_count, + }; + rc.unwrap_or(0) + } + + fn serialize_request(&self) -> Result, prost::EncodeError> { + let request = match self { + Request::Body { request, .. } => { + let rq = schema::v1::BlockRequest { + fields: BlockAttributes::BODY.to_be_u32(), + from_block: Some(schema::v1::block_request::FromBlock::Hash( + request.header.hash().encode(), + )), + to_block: Default::default(), + direction: schema::v1::Direction::Ascending as i32, + max_blocks: 1, + }; + + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + return Ok(buf); + } + Request::Header { request, .. } => { + let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; + schema::v1::light::request::Request::RemoteHeaderRequest(r) + } + Request::Read { request, .. } => { + let r = schema::v1::light::RemoteReadRequest { + block: request.block.encode(), + keys: request.keys.clone(), + }; + schema::v1::light::request::Request::RemoteReadRequest(r) + } + Request::ReadChild { request, .. } => { + let r = schema::v1::light::RemoteReadChildRequest { + block: request.block.encode(), + storage_key: request.storage_key.clone().into_inner(), + keys: request.keys.clone(), + }; + schema::v1::light::request::Request::RemoteReadChildRequest(r) + } + Request::Call { request, .. } => { + let r = schema::v1::light::RemoteCallRequest { + block: request.block.encode(), + method: request.method.clone(), + data: request.call_data.clone(), + }; + schema::v1::light::request::Request::RemoteCallRequest(r) + } + Request::Changes { request, .. } => { + let r = schema::v1::light::RemoteChangesRequest { + first: request.first_block.1.encode(), + last: request.last_block.1.encode(), + min: request.tries_roots.1.encode(), + max: request.max_block.1.encode(), + storage_key: request.storage_key.clone().map(|s| s.into_inner()) + .unwrap_or_default(), + key: request.key.clone(), + }; + schema::v1::light::request::Request::RemoteChangesRequest(r) + } + }; + + let rq = schema::v1::light::Request { request: Some(request) }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + Ok(buf) + } + + fn return_reply(self, result: Result, ClientError>) { + fn send(item: T, sender: oneshot::Sender) { + let _ = sender.send(item); // It is okay if the other end already hung up. + } + match self { + Request::Body { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), + } + Request::Header { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Header(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request), + } + Request::Read { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), + } + Request::ReadChild { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), + } + Request::Call { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), + } + Request::Changes { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::light_client_requests::tests::{DummyFetchChecker, protocol_id, peerset, dummy_header}; + use crate::request_responses::OutboundFailure; + + use assert_matches::assert_matches; + use futures::channel::oneshot; + use futures::executor::block_on; + use futures::poll; + use sc_client_api::StorageProof; + use sp_core::storage::ChildInfo; + use sp_runtime::generic::Header; + use sp_runtime::traits::BlakeTwo256; + use std::collections::HashSet; + use std::iter::FromIterator; + + fn empty_proof() -> Vec { + StorageProof::empty().encode() + } + + #[test] + fn removes_peer_if_told() { + let peer = PeerId::random(); + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len()); + + sender.inject_disconnected(peer); + assert_eq!(0, sender.peers.len()); + } + + type Block = + sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; + + #[test] + fn body_request_fields_encoded_properly() { + let (sender, _receiver) = oneshot::channel(); + let request = Request::::Body { + request: RemoteBodyRequest { + header: dummy_header(), + retry_count: None, + }, + sender, + }; + let serialized_request = request.serialize_request().unwrap(); + let deserialized_request = schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); + assert!(BlockAttributes::from_be_u32(deserialized_request.fields) + .unwrap() + .contains(BlockAttributes::BODY)); + } + + #[test] + fn disconnects_from_peer_if_request_times_out() { + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer0); + sender.inject_connected(peer1); + + assert_eq!( + HashSet::from_iter(&[peer0.clone(), peer1.clone()]), + sender.peers.keys().collect::>(), + "Expect knowledge of two peers." + ); + + assert!(sender.pending_requests.is_empty(), "Expect no pending request."); + assert!(sender.sent_requests.is_empty(), "Expect no sent request."); + + // Issue a request! + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); + assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); + + let OutEvent::SendRequest { target, pending_response, .. } = block_on(sender.next()).unwrap(); + assert!( + target == peer0 || target == peer1, + "Expect request to originate from known peer.", + ); + + // And we should have one busy peer. + assert!({ + let (idle, busy): (Vec<_>, Vec<_>) = sender + .peers + .iter() + .partition(|(_, info)| info.status == PeerStatus::Idle); + idle.len() == 1 + && busy.len() == 1 + && (idle[0].0 == &peer0 || busy[0].0 == &peer0) + && (idle[0].0 == &peer1 || busy[0].0 == &peer1) + }); + + assert_eq!(0, sender.pending_requests.len(), "Expect no pending request."); + assert_eq!(1, sender.sent_requests.len(), "Expect one request to be sent."); + + // Report first attempt as timed out. + pending_response.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).unwrap(); + + // Expect a new request to be issued. + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + + assert_eq!(1, sender.peers.len(), "Expect peer to be removed."); + assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); + assert_eq!(1, sender.sent_requests.len(), "Expect new request to be issued."); + + // Report second attempt as timed out. + pending_response.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).unwrap(); + assert_matches!( + block_on(async { poll!(sender.next()) }), Poll::Pending, + "Expect sender to not issue another attempt.", + ); + assert_matches!( + block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed), + "Expect request failure to be reported.", + ); + assert_eq!(0, sender.peers.len(), "Expect no peer to be left"); + assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); + assert_eq!(0, sender.sent_requests.len(), "Expect no other request to be in progress."); + } + + #[test] + fn disconnects_from_peer_on_incorrect_response() { + let peer = PeerId::random(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: false, + // ^--- Making sure the response data check fails. + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len(), "Expect one peer."); + + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + sender + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + + assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); + assert_eq!(0, sender.sent_requests.len(), "Expect zero sent requests."); + + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); + assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); + + let response = { + let r = schema::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + let response = schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + }; + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + + pending_response.send(Ok(response)).unwrap(); + + assert_matches!( + block_on(async { poll!(sender.next()) }), Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + + assert!(sender.peers.is_empty(), "Expect no peers to be left."); + assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); + assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); + } + + #[test] + fn disconnects_from_peer_on_wrong_response_type() { + let peer = PeerId::random(); + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len(), "Expect one peer."); + + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + sender + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + + assert_eq!(1, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()); + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); + assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); + + let response = { + let r = schema::v1::light::RemoteReadResponse { + proof: empty_proof(), + }; // Not a RemoteCallResponse! + let response = schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + }; + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + + pending_response.send(Ok(response)).unwrap(); + assert_matches!( + block_on(async { poll!(sender.next()) }), Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + + assert!(sender.peers.is_empty(), "Expect no peers to be left."); + assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); + assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); + } + + #[test] + fn receives_remote_failure_after_retry_count_failures() { + let peers = (0..4).map(|_| PeerId::random()).collect::>(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: false, + // ^--- Making sure the response data check fails. + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + for peer in &peers { + sender.inject_connected(*peer); + } + assert_eq!(4, sender.peers.len(), "Expect four peers."); + + let mut chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(3), // Attempt up to three retries. + }; + sender + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + + assert_eq!(1, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()); + let mut pending_response = match block_on(sender.next()).unwrap() { + OutEvent::SendRequest { pending_response, .. } => Some(pending_response), + }; + assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); + assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); + + for (i, _peer) in peers.iter().enumerate() { + // Construct an invalid response + let response = { + let r = schema::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + let response = schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + }; + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + pending_response.take().unwrap().send(Ok(response)).unwrap(); + + if i < 3 { + pending_response = match block_on(sender.next()).unwrap() { + OutEvent::SendRequest { pending_response, .. } => Some(pending_response), + }; + assert_matches!(chan.1.try_recv(), Ok(None)) + } else { + // Last peer and last attempt. + assert_matches!( + block_on(async { poll!(sender.next()) }), Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + assert_matches!( + chan.1.try_recv(), + Ok(Some(Err(ClientError::RemoteFetchFailed))) + ) + } + } + } + + fn issue_request(request: Request) { + let peer = PeerId::random(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len(), "Expect one peer."); + + let response = match request { + Request::Body { .. } => unimplemented!(), + Request::Header { .. } => { + let r = schema::v1::light::RemoteHeaderResponse { + header: dummy_header().encode(), + proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteHeaderResponse( + r, + )), + } + } + Request::Read { .. } => { + let r = schema::v1::light::RemoteReadResponse { + proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + } + Request::ReadChild { .. } => { + let r = schema::v1::light::RemoteReadResponse { + proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + } + Request::Call { .. } => { + let r = schema::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + } + } + Request::Changes { .. } => { + let r = schema::v1::light::RemoteChangesResponse { + max: std::iter::repeat(1).take(32).collect(), + proof: Vec::new(), + roots: Vec::new(), + roots_proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), + } + } + }; + + let response = { + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + + sender.request(request).unwrap(); + + assert_eq!(1, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()); + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + assert_eq!(0, sender.pending_requests.len()); + assert_eq!(1, sender.sent_requests.len()); + + pending_response.send(Ok(response)).unwrap(); + assert_matches!( + block_on(async { poll!(sender.next()) }), Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + + assert_eq!(0, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()) + } + + #[test] + fn receives_remote_call_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + issue_request(Request::Call { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::Read { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_child_response() { + let mut chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); + let request = light::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: child_info.prefixed_storage_key(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::ReadChild { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_header_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + issue_request(Request::Header { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_changes_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + issue_request(Request::Changes { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } +} diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index db2b6429304bb..4ddfadda172e4 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,7 +22,6 @@ use libp2p::{core::ConnectedPoint, Multiaddr}; use serde::{Deserialize, Serialize}; -use slog_derive::SerdeValue; use std::{collections::{HashMap, HashSet}, time::Duration}; /// Returns general information about the networking. @@ -30,7 +29,7 @@ use std::{collections::{HashMap, HashSet}, time::Duration}; /// Meant for general diagnostic purposes. /// /// **Warning**: This API is not stable. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerdeValue)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NetworkState { /// PeerId of the local node. @@ -57,12 +56,6 @@ pub struct Peer { pub version_string: Option, /// Latest ping duration with this node. pub latest_ping_time: Option, - /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols - /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. - pub enabled: bool, - /// If true, the peer is "open", which means that we have a Substrate-related protocol - /// with this peer. - pub open: bool, /// List of addresses known for this node. pub known_addresses: HashSet, } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 084172ee57c4f..ef8076e8cbed7 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ //! On-demand requests service. -use crate::light_client_handler; +use crate::light_client_requests; use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; @@ -45,10 +45,21 @@ pub struct OnDemand { /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in /// asynchronous Rust at the moment - requests_queue: Mutex>>>, + requests_queue: Mutex>>>, /// Sending side of `requests_queue`. - requests_send: TracingUnboundedSender>, + requests_send: TracingUnboundedSender>, +} + + +#[derive(Debug, thiserror::Error)] +#[error("AlwaysBadChecker")] +struct ErrorAlwaysBadChecker; + +impl Into for ErrorAlwaysBadChecker { + fn into(self) -> ClientError { + ClientError::Application(Box::new(self)) + } } /// Dummy implementation of `FetchChecker` that always assumes that responses are bad. @@ -65,7 +76,7 @@ impl FetchChecker for AlwaysBadChecker { _remote_header: Option, _remote_proof: StorageProof, ) -> Result { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_read_proof( @@ -73,7 +84,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteReadRequest, _remote_proof: StorageProof, ) -> Result,Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_read_child_proof( @@ -81,7 +92,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteReadChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_execution_proof( @@ -89,7 +100,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteCallRequest, _remote_proof: StorageProof, ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_changes_proof( @@ -97,7 +108,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteChangesRequest, _remote_proof: ChangesProof ) -> Result, u32)>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_body_proof( @@ -105,7 +116,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteBodyRequest, _body: Vec ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } } @@ -138,7 +149,7 @@ where /// If this function returns `None`, that means that the receiver has already been extracted in /// the past, and therefore that something already handles the requests. pub(crate) fn extract_receiver(&self) - -> Option>> + -> Option>> { self.requests_queue.lock().take() } @@ -159,7 +170,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Header { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Header { request, sender }); RemoteResponse { receiver } } @@ -167,7 +178,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Read { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Read { request, sender }); RemoteResponse { receiver } } @@ -178,7 +189,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::ReadChild { request, sender }); + .unbounded_send(light_client_requests::sender::Request::ReadChild { request, sender }); RemoteResponse { receiver } } @@ -186,7 +197,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Call { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Call { request, sender }); RemoteResponse { receiver } } @@ -197,7 +208,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Changes { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Changes { request, sender }); RemoteResponse { receiver } } @@ -205,7 +216,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Body { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Body { request, sender }); RemoteResponse { receiver } } } diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index e69ad2b17e59c..28b913ea40192 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use fnv::FnvHashMap; use futures::prelude::*; @@ -304,8 +306,8 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: EitherOutput::First(event) }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } @@ -334,8 +336,8 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: EitherOutput::Second(event) }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ac74af0f5ca94..4997bc36e53e7 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,43 +19,45 @@ use crate::{ ExHashT, chain::Client, - config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, + config::{self, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, error, + request_responses::RequestFailure, utils::{interval, LruHashSet}, }; use bytes::{Bytes, BytesMut}; -use futures::{prelude::*, stream::FuturesUnordered}; +use codec::{Decode, DecodeAll, Encode}; +use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; use generic_proto::{GenericProto, GenericProtoOut}; -use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; -use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; +use libp2p::request_response::OutboundFailure; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; +use libp2p::{Multiaddr, PeerId}; +use log::{log, Level, trace, debug, warn, error}; +use message::{BlockAnnounce, Message}; +use message::generic::{Message as GenericMessage, Roles}; +use prometheus_endpoint::{ + Registry, Gauge, Counter, GaugeVec, + PrometheusError, Opts, register, U64 +}; +use prost::Message as _; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} }; -use codec::{Decode, DecodeAll, Encode}; -use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; +use sp_runtime::{generic::BlockId, Justification}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message}; -use message::generic::{Message as GenericMessage, Roles}; -use prometheus_endpoint::{ - Registry, Gauge, Counter, GaugeVec, - PrometheusError, Opts, register, U64 -}; use sync::{ChainSync, SyncState}; use std::borrow::Cow; +use std::convert::TryFrom as _; use std::collections::{HashMap, HashSet, VecDeque, hash_map::Entry}; use std::sync::Arc; -use std::fmt::Write; use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; -use log::{log, Level, trace, debug, warn, error}; -use wasm_timer::Instant; mod generic_proto; @@ -63,22 +65,30 @@ pub mod message; pub mod event; pub mod sync; -pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError}; +pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError}; -const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); /// Interval at which we propagate transactions; const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); -/// Maximim number of known block hashes to keep for a peer. +/// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// Maximim number of known transaction hashes to keep for a peer. +/// Maximum number of known transaction hashes to keep for a peer. /// /// This should be approx. 2 blocks full of transactions for the network to function properly. const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. -/// Maximim number of transaction validation request we keep at any moment. +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; +/// Maximum allowed size for a transactions notification. +const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum size used for notifications in the block announce and transaction protocols. +// Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. +pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum number of transaction validation request we keep at any moment. const MAX_PENDING_TRANSACTIONS: usize = 8192; /// Current protocol version. @@ -86,6 +96,14 @@ pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; +/// Identifier of the peerset for the block announces protocol. +const HARDCODED_PEERSETS_SYNC: sc_peerset::SetId = sc_peerset::SetId::from(0); +/// Identifier of the peerset for the transactions protocol. +const HARDCODED_PEERSETS_TX: sc_peerset::SetId = sc_peerset::SetId::from(1); +/// Number of hardcoded peersets (the constants right above). Any set whose identifier is equal or +/// superior to this value corresponds to a user-defined protocol. +const NUM_HARDCODED_PEERSETS: usize = 2; + /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful /// and disconnect to free connection slot. @@ -95,6 +113,8 @@ mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer doesn't respond in time to our messages. pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); + /// Reputation change when a peer refuses a request. + pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); /// Reputation change when a peer sends us any transaction. @@ -110,8 +130,6 @@ mod rep { pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); - /// We received an unexpected response. - pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); /// We received an unexpected transaction packet. pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); /// Peer has different genesis. @@ -120,18 +138,14 @@ mod rep { pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); /// Peer role does not match (e.g. light peer connecting to another light peer). pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); - /// Peer response data does not have requested bits. - pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); /// Peer send us a block announcement that failed at validation. pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); } struct Metrics { - obsolete_requests: Gauge, peers: Gauge, queued_blocks: Gauge, fork_targets: Gauge, - finality_proofs: GaugeVec, justifications: GaugeVec, propagated_transactions: Counter, } @@ -139,10 +153,6 @@ struct Metrics { impl Metrics { fn register(r: &Registry) -> Result { Ok(Metrics { - obsolete_requests: { - let g = Gauge::new("sync_obsolete_requests", "Number of obsolete requests")?; - register(g, r)? - }, peers: { let g = Gauge::new("sync_peers", "Number of peers we sync with")?; register(g, r)? @@ -165,16 +175,6 @@ impl Metrics { )?; register(g, r)? }, - finality_proofs: { - let g = GaugeVec::new( - Opts::new( - "sync_extra_finality_proofs", - "Number of extra finality proof requests", - ), - &["status"], - )?; - register(g, r)? - }, propagated_transactions: register(Counter::new( "sync_propagated_transactions", "Number of transactions propagated to at least one peer", @@ -222,7 +222,9 @@ pub struct Protocol { config: ProtocolConfig, genesis_hash: B::Hash, sync: ChainSync, - context_data: ContextData, + // All connected peers + peers: HashMap>, + chain: Arc>, /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, @@ -231,41 +233,29 @@ pub struct Protocol { transaction_pool: Arc>, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: GenericProto, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: HashMap>, - /// For each protocol name, the legacy equivalent. - legacy_equiv_by_name: HashMap, Fallback>, - /// Name of the protocol used for transactions. - transactions_protocol: Cow<'static, str>, - /// Name of the protocol used for block announces. - block_announces_protocol: Cow<'static, str>, + /// List of notifications protocols that have been registered. + notification_protocols: Vec>, /// Prometheus metrics. metrics: Option, /// The `PeerId`'s of all boot nodes. - boot_node_ids: Arc>, + boot_node_ids: HashSet, + /// A cache for the data that was associated to a block announcement. + block_announce_data_cache: lru::LruCache>, } -#[derive(Default)] -struct PacketStats { - bytes_in: u64, - bytes_out: u64, - count_in: u64, - count_out: u64, -} /// Peer information -#[derive(Debug, Clone)] +#[derive(Debug)] struct Peer { info: PeerInfo, - /// Current block request, if any. - block_request: Option<(Instant, message::BlockRequest)>, - /// Requests we are no longer interested in. - obsolete_requests: HashMap, + /// Current block request, if any. Started by emitting [`CustomMessageOutcome::BlockRequest`]. + block_request: Option<( + message::BlockRequest, + oneshot::Receiver, RequestFailure>>, + )>, /// Holds a set of transactions known to this peer. known_transactions: LruHashSet, /// Holds a set of blocks known to this peer. known_blocks: LruHashSet, - /// Request counter, - next_request_id: message::RequestId, } /// Info about a peer's known state. @@ -279,14 +269,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -/// Data necessary to create a context. -struct ContextData { - // All connected peers - peers: HashMap>, - stats: HashMap<&'static str, PacketStats>, - pub chain: Arc>, -} - /// Configuration for the Substrate-specific part of the networking layer. #[derive(Clone)] pub struct ProtocolConfig { @@ -319,80 +301,159 @@ struct BlockAnnouncesHandshake { } impl BlockAnnouncesHandshake { - fn build(protocol_config: &ProtocolConfig, chain: &Arc>) -> Self { - let info = chain.info(); + fn build( + protocol_config: &ProtocolConfig, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> Self { BlockAnnouncesHandshake { - genesis_hash: info.genesis_hash, + genesis_hash, roles: protocol_config.roles, - best_number: info.best_number, - best_hash: info.best_hash, + best_number, + best_hash, } } } /// Builds a SCALE-encoded "Status" message to send as handshake for the legacy protocol. -fn build_status_message(protocol_config: &ProtocolConfig, chain: &Arc>) -> Vec { - let info = chain.info(); +fn build_status_message( + protocol_config: &ProtocolConfig, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, +) -> Vec { let status = message::generic::Status { version: CURRENT_VERSION, min_supported_version: MIN_VERSION, - genesis_hash: info.genesis_hash, + genesis_hash, roles: protocol_config.roles.into(), - best_number: info.best_number, - best_hash: info.best_hash, + best_number, + best_hash, chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible }; Message::::Status(status).encode() } -/// Fallback mechanism to use to send a notification if no substream is open. -#[derive(Debug, Clone, PartialEq, Eq)] -enum Fallback { - /// Use a `Message::Consensus` with the given engine ID. - Consensus(ConsensusEngineId), - /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). - Transactions, - /// The message is the bytes encoding of a `BlockAnnounce`. - BlockAnnounce, -} - impl Protocol { /// Create a new instance. pub fn new( config: ProtocolConfig, - local_peer_id: PeerId, chain: Arc>, transaction_pool: Arc>, - finality_proof_request_builder: Option>, protocol_id: ProtocolId, - peerset_config: sc_peerset::PeersetConfig, + config_role: &config::Role, + network_config: &config::NetworkConfiguration, block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, - boot_node_ids: Arc>, - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { + ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); let sync = ChainSync::new( config.roles, chain.clone(), &info, - finality_proof_request_builder, block_announce_validator, config.max_parallel_downloads, ); + let boot_node_ids = { + let mut list = HashSet::new(); + for node in &network_config.boot_nodes { + list.insert(node.peer_id.clone()); + } + list.shrink_to_fit(); + list + }; + let important_peers = { let mut imp_p = HashSet::new(); - for reserved in peerset_config.priority_groups.iter().flat_map(|(_, l)| l.iter()) { - imp_p.insert(reserved.clone()); + for reserved in &network_config.default_peers_set.reserved_nodes { + imp_p.insert(reserved.peer_id.clone()); + } + for reserved in network_config.extra_sets.iter().flat_map(|s| s.set_config.reserved_nodes.iter()) { + imp_p.insert(reserved.peer_id.clone()); } imp_p.shrink_to_fit(); imp_p }; - let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); + let mut known_addresses = Vec::new(); + + let (peerset, peerset_handle) = { + let mut sets = Vec::with_capacity(NUM_HARDCODED_PEERSETS + network_config.extra_sets.len()); + + let mut default_sets_reserved = HashSet::new(); + match config_role { + config::Role::Sentry { validators } => { + for validator in validators { + default_sets_reserved.insert(validator.peer_id.clone()); + known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); + } + } + config::Role::Authority { sentry_nodes } => { + for sentry_node in sentry_nodes { + default_sets_reserved.insert(sentry_node.peer_id.clone()); + known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); + } + } + _ => {} + }; + for reserved in network_config.default_peers_set.reserved_nodes.iter() { + default_sets_reserved.insert(reserved.peer_id.clone()); + known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); + } + + let mut bootnodes = Vec::with_capacity(network_config.boot_nodes.len()); + for bootnode in network_config.boot_nodes.iter() { + bootnodes.push(bootnode.peer_id.clone()); + known_addresses.push((bootnode.peer_id.clone(), bootnode.multiaddr.clone())); + } + + // Set number 0 is used for block announces. + sets.push(sc_peerset::SetConfig { + in_peers: network_config.default_peers_set.in_peers, + out_peers: network_config.default_peers_set.out_peers, + bootnodes, + reserved_nodes: default_sets_reserved.clone(), + reserved_only: network_config.default_peers_set.non_reserved_mode + == config::NonReservedPeerMode::Deny, + }); + + // Set number 1 is used for transactions. + // The `reserved_nodes` of this set are later kept in sync with the peers we connect + // to through set 0. + sets.push(sc_peerset::SetConfig { + in_peers: 0, + out_peers: 0, + bootnodes: Vec::new(), + reserved_nodes: HashSet::new(), + reserved_only: true, + }); + + for set_cfg in &network_config.extra_sets { + let mut reserved_nodes = HashSet::new(); + for reserved in set_cfg.set_config.reserved_nodes.iter() { + reserved_nodes.insert(reserved.peer_id.clone()); + known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); + } + + let reserved_only = + set_cfg.set_config.non_reserved_mode == config::NonReservedPeerMode::Deny; - let mut legacy_equiv_by_name = HashMap::new(); + sets.push(sc_peerset::SetConfig { + in_peers: set_cfg.set_config.in_peers, + out_peers: set_cfg.set_config.out_peers, + bootnodes: Vec::new(), + reserved_nodes, + reserved_only, + }); + } + + sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { + sets, + }) + }; let transactions_protocol: Cow<'static, str> = Cow::from({ let mut proto = String::new(); @@ -401,7 +462,6 @@ impl Protocol { proto.push_str("/transactions/1"); proto }); - legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); let block_announces_protocol: Cow<'static, str> = Cow::from({ let mut proto = String::new(); @@ -410,24 +470,42 @@ impl Protocol { proto.push_str("/block-announces/1"); proto }); - legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); let behaviour = { let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let block_announces_handshake = BlockAnnouncesHandshake::build(&config, &chain).encode(); + let handshake_message = Roles::from(config_role).encode(); + + let best_number = info.best_number; + let best_hash = info.best_hash; + let genesis_hash = info.genesis_hash; + + let block_announces_handshake = BlockAnnouncesHandshake::::build( + &config, + best_number, + best_hash, + genesis_hash, + ).encode(); + GenericProto::new( - local_peer_id, protocol_id.clone(), versions, - build_status_message(&config, &chain), + build_status_message::(&config, best_number, best_hash, genesis_hash), peerset, - // As documented in `GenericProto`, the first protocol in the list is always the - // one carrying the handshake reported in the `CustomProtocolOpen` event. - iter::once((block_announces_protocol.clone(), block_announces_handshake)) - .chain(iter::once((transactions_protocol.clone(), vec![]))), + iter::once((block_announces_protocol, block_announces_handshake, MAX_BLOCK_ANNOUNCE_SIZE)) + .chain(iter::once((transactions_protocol, vec![], MAX_TRANSACTIONS_SIZE))) + .chain(network_config.extra_sets.iter().map(|s| ( + s.notifications_protocol.clone(), + handshake_message.clone(), + s.max_notification_size + ))), ) }; + let block_announce_data_cache = lru::LruCache::new( + network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize, + ); + let protocol = Protocol { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), @@ -435,30 +513,26 @@ impl Protocol { pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), config, - context_data: ContextData { - peers: HashMap::new(), - stats: HashMap::new(), - chain, - }, + peers: HashMap::new(), + chain, genesis_hash: info.genesis_hash, sync, important_peers, transaction_pool, peerset_handle: peerset_handle.clone(), behaviour, - protocol_name_by_engine: HashMap::new(), - legacy_equiv_by_name, - transactions_protocol, - block_announces_protocol, + notification_protocols: + network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone()).collect(), metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) } else { None }, boot_node_ids, + block_announce_data_cache, }; - Ok((protocol, peerset_handle)) + Ok((protocol, peerset_handle, known_addresses)) } /// Returns the list of all the peers we have an open channel to. @@ -466,14 +540,10 @@ impl Protocol { self.behaviour.open_peers() } - /// Returns true if we have a channel open with this node. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.behaviour.is_open(peer_id) - } - - /// Returns the list of all the peers that the peerset currently requests us to be connected to. + /// Returns the list of all the peers that the peerset currently requests us to be connected + /// to on the default set. pub fn requested_peers(&self) -> impl Iterator { - self.behaviour.requested_peers() + self.behaviour.requested_peers(HARDCODED_PEERSETS_SYNC) } /// Returns the number of discovered nodes that we keep in memory. @@ -482,13 +552,12 @@ impl Protocol { } /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - self.behaviour.disconnect_peer(peer_id) - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - self.behaviour.is_enabled(peer_id) + pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: &str) { + if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { + self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS)); + } else { + log::warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") + } } /// Returns the state of the peerset manager, for debugging purposes. @@ -498,13 +567,12 @@ impl Protocol { /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.context_data.peers.values().count() + self.peers.values().count() } /// Returns the number of peers we're connected to and that are being queried. pub fn num_active_peers(&self) -> usize { - self.context_data - .peers + self.peers .values() .filter(|p| p.block_request.is_some()) .count() @@ -540,27 +608,29 @@ impl Protocol { self.sync.num_sync_requests() } - /// Sync local state with the blockchain state. - pub fn update_chain(&mut self) { - let info = self.context_data.chain.info(); - self.sync.update_chain_info(&info.best_hash, info.best_number); + /// Inform sync about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + trace!(target: "sync", "New best block imported {:?}/#{}", hash, number); + + self.sync.update_chain_info(&hash, number); + self.behaviour.set_legacy_handshake_message( - build_status_message(&self.config, &self.context_data.chain), + build_status_message::(&self.config, number, hash, self.genesis_hash), ); self.behaviour.set_notif_protocol_handshake( - &self.block_announces_protocol, - BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() + HARDCODED_PEERSETS_SYNC, + BlockAnnouncesHandshake::::build( + &self.config, + number, + hash, + self.genesis_hash, + ).encode() ); } - /// Inform sync about an own imported block. - pub fn own_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - self.sync.update_chain_info(&hash, number); - } - fn update_peer_info(&mut self, who: &PeerId) { if let Some(info) = self.sync.peer_info(who) { - if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { + if let Some(ref mut peer) = self.peers.get_mut(who) { peer.info.best_hash = info.best_hash; peer.info.best_number = info.best_number; } @@ -569,7 +639,7 @@ impl Protocol { /// Returns information about all the peers we are connected to after the handshake message. pub fn peers_info(&self) -> impl Iterator)> { - self.context_data.peers.iter().map(|(id, peer)| (id, &peer.info)) + self.peers.iter().map(|(id, peer)| (id, &peer.info)) } fn on_custom_message( @@ -585,17 +655,13 @@ impl Protocol { "Couldn't decode packet sent by {}: {:?}: {}", who, data, - err.what(), + err, ); self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); return CustomMessageOutcome::None; } }; - let mut stats = self.context_data.stats.entry(message.id()).or_default(); - stats.bytes_in += data.len() as u64; - stats.count_in += 1; - match message { GenericMessage::Status(_) => debug!(target: "sub-libp2p", "Received unexpected Status"), @@ -613,82 +679,50 @@ impl Protocol { warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), GenericMessage::RemoteChangesResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), - GenericMessage::FinalityProofResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected FinalityProofResponse"), GenericMessage::BlockRequest(_) | - GenericMessage::FinalityProofRequest(_) | GenericMessage::RemoteReadChildRequest(_) | GenericMessage::RemoteCallRequest(_) | GenericMessage::RemoteReadRequest(_) | GenericMessage::RemoteHeaderRequest(_) | - GenericMessage::RemoteChangesRequest(_) => { + GenericMessage::RemoteChangesRequest(_) | + GenericMessage::Consensus(_) | + GenericMessage::ConsensusBatch(_) => { debug!( target: "sub-libp2p", "Received no longer supported legacy request from {:?}", who ); - self.disconnect_peer(&who); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(who, rep::BAD_PROTOCOL); }, - GenericMessage::Consensus(msg) => - return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - CustomMessageOutcome::NotificationsReceived { - remote: who, - messages: vec![(msg.engine_id, From::from(msg.data))], - } - } else { - debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - CustomMessageOutcome::None - }, - GenericMessage::ConsensusBatch(messages) => { - let messages = messages - .into_iter() - .filter_map(|msg| { - if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - Some((msg.engine_id, From::from(msg.data))) - } else { - debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - None - } - }) - .collect::>(); - - return if !messages.is_empty() { - CustomMessageOutcome::NotificationsReceived { - remote: who, - messages, - } - } else { - CustomMessageOutcome::None - }; - }, } CustomMessageOutcome::None } - fn update_peer_request(&mut self, who: &PeerId, request: &mut message::BlockRequest) { - update_peer_request::(&mut self.context_data.peers, who, request) + fn prepare_block_request( + &mut self, + who: PeerId, + request: message::BlockRequest, + ) -> CustomMessageOutcome { + prepare_block_request::(&mut self.peers, who, request) } - /// Called by peer when it is disconnecting - pub fn on_peer_disconnected(&mut self, peer: PeerId) -> CustomMessageOutcome { + /// Called by peer when it is disconnecting. + /// + /// Returns a result if the handshake of this peer was indeed accepted. + pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { if self.important_peers.contains(&peer) { warn!(target: "sync", "Reserved peer {} disconnected", peer); } else { trace!(target: "sync", "{} disconnected", peer); } - if let Some(_peer_data) = self.context_data.peers.remove(&peer) { + if let Some(_peer_data) = self.peers.remove(&peer) { self.sync.peer_disconnected(&peer); - - // Notify all the notification protocols as closed. - CustomMessageOutcome::NotificationStreamClosed { - remote: peer, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - } + Ok(()) } else { - CustomMessageOutcome::None + Err(()) } } @@ -701,88 +735,94 @@ impl Protocol { /// Must contain the same `PeerId` and request that have been emitted. pub fn on_block_response( &mut self, - peer: PeerId, - response: message::BlockResponse, + peer_id: PeerId, + request: message::BlockRequest, + response: crate::schema::v1::BlockResponse, ) -> CustomMessageOutcome { - let request = if let Some(ref mut p) = self.context_data.peers.get_mut(&peer) { - if p.obsolete_requests.remove(&response.id).is_some() { - trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", peer, response.id); + let blocks = response.blocks.into_iter().map(|block_data| { + Ok(message::BlockData:: { + hash: Decode::decode(&mut block_data.hash.as_ref())?, + header: if !block_data.header.is_empty() { + Some(Decode::decode(&mut block_data.header.as_ref())?) + } else { + None + }, + body: if request.fields.contains(message::BlockAttributes::BODY) { + Some(block_data.body.iter().map(|body| { + Decode::decode(&mut body.as_ref()) + }).collect::, _>>()?) + } else { + None + }, + receipt: if !block_data.message_queue.is_empty() { + Some(block_data.receipt) + } else { + None + }, + message_queue: if !block_data.message_queue.is_empty() { + Some(block_data.message_queue) + } else { + None + }, + justification: if !block_data.justification.is_empty() { + Some(block_data.justification) + } else if block_data.is_empty_justification { + Some(Vec::new()) + } else { + None + }, + }) + }).collect::, codec::Error>>(); + + let blocks = match blocks { + Ok(blocks) => blocks, + Err(err) => { + debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); return CustomMessageOutcome::None; } - // Clear the request. If the response is invalid peer will be disconnected anyway. - match p.block_request.take() { - Some((_, request)) if request.id == response.id => request, - Some(_) => { - trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", peer, response.id); - return CustomMessageOutcome::None; - } - None => { - trace!(target: "sync", "Unexpected response packet from unknown peer {}", peer); - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::UNEXPECTED_RESPONSE); - return CustomMessageOutcome::None; - } - } - } else { - trace!(target: "sync", "Unexpected response packet from unknown peer {}", peer); - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::UNEXPECTED_RESPONSE); - return CustomMessageOutcome::None; + }; + + let block_response = message::BlockResponse:: { + id: request.id, + blocks, }; let blocks_range = || match ( - response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), ) { (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), (Some(first), Some(_)) => format!(" ({})", first), _ => Default::default(), }; trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", - response.id, - peer, - response.blocks.len(), + block_response.id, + peer_id, + block_response.blocks.len(), blocks_range(), ); if request.fields == message::BlockAttributes::JUSTIFICATION { - match self.sync.on_block_justification(peer, response) { + match self.sync.on_block_justification(peer_id, block_response) { Ok(sync::OnBlockJustification::Nothing) => CustomMessageOutcome::None, Ok(sync::OnBlockJustification::Import { peer, hash, number, justification }) => CustomMessageOutcome::JustificationImport(peer, hash, number, justification), Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None } } } else { - // Validate fields against the request. - if request.fields.contains(message::BlockAttributes::HEADER) && response.blocks.iter().any(|b| b.header.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing header for a block"); - return CustomMessageOutcome::None - } - if request.fields.contains(message::BlockAttributes::BODY) && response.blocks.iter().any(|b| b.body.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing body for a block"); - return CustomMessageOutcome::None - } - - match self.sync.on_block_data(&peer, Some(request), response) { + match self.sync.on_block_data(&peer_id, Some(request), block_response) { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(sync::OnBlockData::Request(peer, mut req)) => { - self.update_peer_request(&peer, &mut req); - CustomMessageOutcome::BlockRequest { - target: peer, - request: req, - } + Ok(sync::OnBlockData::Request(peer, req)) => { + self.prepare_block_request(peer, req) } Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None } @@ -790,65 +830,31 @@ impl Protocol { } } - /// Must be called in response to a [`CustomMessageOutcome::BlockRequest`] if it has failed. - pub fn on_block_request_failed( - &mut self, - peer: &PeerId, - ) { - self.peerset_handle.report_peer(peer.clone(), rep::TIMEOUT); - self.behaviour.disconnect_peer(peer); - } - /// Perform time based maintenance. /// /// > **Note**: This method normally doesn't have to be called except for testing purposes. pub fn tick(&mut self) { - self.maintain_peers(); self.report_metrics() } - fn maintain_peers(&mut self) { - let tick = Instant::now(); - let mut aborting = Vec::new(); - { - for (who, peer) in self.context_data.peers.iter() { - if peer.block_request.as_ref().map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Request timeout {}", who - ); - aborting.push(who.clone()); - } else if peer.obsolete_requests.values().any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Obsolete timeout {}", who - ); - aborting.push(who.clone()); - } - } - } - - for p in aborting { - self.behaviour.disconnect_peer(&p); - self.peerset_handle.report_peer(p, rep::TIMEOUT); - } - } - - /// Called on the first connection between two peers, after their exchange of handshake. - fn on_peer_connected( + /// Called on the first connection between two peers on the default set, after their exchange + /// of handshake. + /// + /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync + /// from. + fn on_sync_peer_connected( &mut self, who: PeerId, status: BlockAnnouncesHandshake, - notifications_sink: NotificationsSink, - ) -> CustomMessageOutcome { + ) -> Result<(), ()> { trace!(target: "sync", "New peer {} {:?}", who, status); - if self.context_data.peers.contains_key(&who) { - debug!(target: "sync", "Ignoring duplicate status packet from {}", who); - return CustomMessageOutcome::None; + if self.peers.contains_key(&who) { + log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); + debug_assert!(false); + return Err(()); } + if status.genesis_hash != self.genesis_hash { log!( target: "sync", @@ -857,7 +863,7 @@ impl Protocol { self.genesis_hash, status.genesis_hash ); self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); - self.behaviour.disconnect_peer(&who); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); if self.boot_node_ids.contains(&who) { error!( @@ -869,7 +875,7 @@ impl Protocol { ); } - return CustomMessageOutcome::None; + return Err(()); } if self.config.roles.is_light() { @@ -877,13 +883,12 @@ impl Protocol { if status.roles.is_light() { debug!(target: "sync", "Peer {} is unable to serve light requests", who); self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + return Err(()); } // we don't interested in peers that are far behind us let self_best_block = self - .context_data .chain .info() .best_number; @@ -894,8 +899,8 @@ impl Protocol { if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + return Err(()); } } @@ -910,69 +915,32 @@ impl Protocol { .expect("Constant is nonzero")), known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) .expect("Constant is nonzero")), - next_request_id: 0, - obsolete_requests: HashMap::new(), }; - self.context_data.peers.insert(who.clone(), peer); - - debug!(target: "sync", "Connected {}", who); - let info = self.context_data.peers.get(&who).expect("We just inserted above; QED").info.clone(); - self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); - if info.roles.is_full() { - match self.sync.new_peer(who.clone(), info.best_hash, info.best_number) { - Ok(None) => (), - Ok(Some(mut req)) => { - self.update_peer_request(&who, &mut req); - self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { - target: who.clone(), - request: req, - }); - }, + let req = if peer.info.roles.is_full() { + match self.sync.new_peer(who.clone(), peer.info.best_hash, peer.info.best_number) { + Ok(req) => req, Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu) + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer(id, repu); + return Err(()) } } - } + } else { + None + }; - // Notify all the notification protocols as open. - CustomMessageOutcome::NotificationStreamOpened { - remote: who, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - roles: info.roles, - notifications_sink, - } - } + debug!(target: "sync", "Connected {}", who); - /// Registers a new notifications protocol. - /// - /// While registering a protocol while we already have open connections is discouraged, we - /// nonetheless handle it by notifying that we opened channels with everyone. This function - /// returns a list of substreams to open as a result. - pub fn register_notifications_protocol<'a>( - &'a mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, - handshake_message: Vec, - ) -> impl Iterator + 'a { - let protocol_name = protocol_name.into(); - if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { - error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); - } else { - self.behaviour.register_notif_protocol(protocol_name.clone(), handshake_message); - self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); + self.peers.insert(who.clone(), peer); + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); + + if let Some(req) = req { + let event = self.prepare_block_request(who.clone(), req); + self.pending_messages.push_back(event); } - let behaviour = &self.behaviour; - self.context_data.peers.iter().filter_map(move |(peer_id, peer)| { - if let Some(notifications_sink) = behaviour.notifications_sink(peer_id) { - Some((peer_id, peer.info.roles, notifications_sink)) - } else { - log::error!("State mismatch: no notifications sink for opened peer {:?}", peer_id); - None - } - }) + Ok(()) } /// Called when peer sends us new transactions @@ -984,7 +952,7 @@ impl Protocol { // sending transaction to light node is considered a bad behavior if !self.config.roles.is_full() { trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); - self.behaviour.disconnect_peer(&who); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_TX); self.peerset_handle.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); return; } @@ -996,7 +964,7 @@ impl Protocol { } trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + if let Some(ref mut peer) = self.peers.get_mut(&who) { for t in transactions { if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { debug!( @@ -1060,12 +1028,16 @@ impl Protocol { let mut propagated_to = HashMap::<_, Vec<_>>::new(); let mut propagated_transactions = 0; - for (who, peer) in self.context_data.peers.iter_mut() { + for (who, peer) in self.peers.iter_mut() { // never send transactions to the light node if !peer.info.roles.is_full() { continue; } + if !self.behaviour.is_open(who, HARDCODED_PEERSETS_TX) { + continue; + } + let (hashes, to_send): (Vec<_>, Vec<_>) = transactions .iter() .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) @@ -1084,7 +1056,7 @@ impl Protocol { trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); self.behaviour.write_notification( who, - self.transactions_protocol.clone(), + HARDCODED_PEERSETS_TX, to_send.encode() ); } @@ -1113,8 +1085,8 @@ impl Protocol { /// /// In chain-based consensus, we often need to make sure non-best forks are /// at least temporarily synced. - pub fn announce_block(&mut self, hash: B::Hash, data: Vec) { - let header = match self.context_data.chain.header(BlockId::Hash(hash)) { + pub fn announce_block(&mut self, hash: B::Hash, data: Option>) { + let header = match self.chain.header(BlockId::Hash(hash)) { Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); @@ -1131,18 +1103,15 @@ impl Protocol { return; } - let is_best = self.context_data.chain.info().best_hash == hash; + let is_best = self.chain.info().best_hash == hash; debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); - self.send_announcement(&header, data, is_best, true) - } - fn send_announcement(&mut self, header: &B::Header, data: Vec, is_best: bool, force: bool) { - let hash = header.hash(); + let data = data.or_else(|| self.block_announce_data_cache.get(&hash).cloned()).unwrap_or_default(); - for (who, ref mut peer) in self.context_data.peers.iter_mut() { - trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + for (who, ref mut peer) in self.peers.iter_mut() { let inserted = peer.known_blocks.insert(hash); - if inserted || force { + if inserted { + trace!(target: "sync", "Announcing block {:?} to {}", hash, who); let message = message::BlockAnnounce { header: header.clone(), state: if is_best { @@ -1155,7 +1124,7 @@ impl Protocol { self.behaviour.write_notification( who, - self.block_announces_protocol.clone(), + HARDCODED_PEERSETS_SYNC, message.encode() ); } @@ -1182,16 +1151,25 @@ impl Protocol { ) { let hash = announce.header.hash(); - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - peer.known_blocks.insert(hash.clone()); - } + let peer = match self.peers.get_mut(&who) { + Some(p) => p, + None => { + log::error!(target: "sync", "Received block announce from disconnected peer {}", who); + debug_assert!(false); + return; + } + }; + + peer.known_blocks.insert(hash.clone()); let is_best = match announce.state.unwrap_or(message::BlockState::Best) { message::BlockState::Best => true, message::BlockState::Normal => false, }; - self.sync.push_block_announce_validation(who, hash, announce, is_best); + if peer.info.roles.is_full() { + self.sync.push_block_announce_validation(who, hash, announce, is_best); + } } /// Process the result of the block announce validation. @@ -1200,9 +1178,17 @@ impl Protocol { validation_result: sync::PollBlockAnnounceValidation, ) -> CustomMessageOutcome { let (header, is_best, who) = match validation_result { - sync::PollBlockAnnounceValidation::Nothing { is_best, who, header } => { + sync::PollBlockAnnounceValidation::Skip => + return CustomMessageOutcome::None, + sync::PollBlockAnnounceValidation::Nothing { is_best, who, announce } => { self.update_peer_info(&who); + if let Some(data) = announce.data { + if !data.is_empty() { + self.block_announce_data_cache.put(announce.header.hash(), data); + } + } + // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` // when we have all data required to import the block // in the BlockAnnounce message. This is only when: @@ -1210,16 +1196,27 @@ impl Protocol { // AND // 2) parent block is already imported and not pruned. if is_best { - return CustomMessageOutcome::PeerNewBest(who, *header.number()) + return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()) } else { return CustomMessageOutcome::None } } - sync::PollBlockAnnounceValidation::ImportHeader { header, is_best, who } => { + sync::PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { self.update_peer_info(&who); - (header, is_best, who) + + if let Some(data) = announce.data { + if !data.is_empty() { + self.block_announce_data_cache.put(announce.header.hash(), data); + } + } + + (announce.header, is_best, who) } - sync::PollBlockAnnounceValidation::Failure { who } => { + sync::PollBlockAnnounceValidation::Failure { who, disconnect } => { + if disconnect { + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + } + self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); return CustomMessageOutcome::None } @@ -1257,15 +1254,11 @@ impl Protocol { Ok(sync::OnBlockData::Import(origin, blocks)) => { CustomMessageOutcome::BlockImport(origin, blocks) }, - Ok(sync::OnBlockData::Request(peer, mut req)) => { - self.update_peer_request(&peer, &mut req); - CustomMessageOutcome::BlockRequest { - target: peer, - request: req, - } + Ok(sync::OnBlockData::Request(peer, req)) => { + self.prepare_block_request(peer, req) } Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None } @@ -1302,18 +1295,6 @@ impl Protocol { count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)> ) { - let new_best = results.iter().rev().find_map(|r| match r { - (Ok(BlockImportResult::ImportedUnknown(n, aux, _)), hash) if aux.is_new_best => Some((*n, hash.clone())), - _ => None, - }); - if let Some((best_num, best_hash)) = new_best { - self.sync.update_chain_info(&best_hash, best_num); - self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); - self.behaviour.set_notif_protocol_handshake( - &self.block_announces_protocol, - BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() - ); - } let results = self.sync.on_blocks_processed( imported, count, @@ -1321,15 +1302,13 @@ impl Protocol { ); for result in results { match result { - Ok((id, mut req)) => { - update_peer_request(&mut self.context_data.peers, &id, &mut req); - self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { - target: id, - request: req, - }); + Ok((id, req)) => { + self.pending_messages.push_back( + prepare_block_request(&mut self.peers, id, req) + ); } Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu) } } @@ -1338,80 +1317,106 @@ impl Protocol { /// Call this when a justification has been processed by the import queue, with or without /// errors. - pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - self.sync.on_justification_import(hash, number, success) + pub fn justification_import_result(&mut self, who: PeerId, hash: B::Hash, number: NumberFor, success: bool) { + self.sync.on_justification_import(hash, number, success); + if !success { + log::info!("💔 Invalid justification provided by {} for #{}", who, hash); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer( + who, + sc_peerset::ReputationChange::new_fatal("Invalid justification") + ); + } } - /// Request a finality proof for the given block. - /// - /// Queues a new finality proof request and tries to dispatch all pending requests. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_finality_proof(&hash, number) + /// Set whether the syncing peers set is in reserved-only mode. + pub fn set_reserved_only(&self, reserved_only: bool) { + self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); + self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_TX, reserved_only); } - /// Notify the protocol that we have learned about the existence of nodes. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.behaviour.add_discovered_nodes(peer_ids) + /// Removes a `PeerId` from the list of reserved peers for syncing purposes. + pub fn remove_reserved_peer(&self, peer: PeerId) { + self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); + self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_TX, peer); } - pub fn finality_proof_import_result( - &mut self, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - self.sync.on_finality_proof_import(request_block, finalization_result) + /// Adds a `PeerId` to the list of reserved peers for syncing purposes. + pub fn add_reserved_peer(&self, peer: PeerId) { + self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); + self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_TX, peer); } - /// Must be called after a [`CustomMessageOutcome::FinalityProofRequest`] has been emitted, - /// to notify of the response having arrived. - pub fn on_finality_proof_response( - &mut self, - who: PeerId, - response: message::FinalityProofResponse, - ) -> CustomMessageOutcome { - trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); - match self.sync.on_block_finality_proof(who, response) { - Ok(sync::OnBlockFinalityProof::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockFinalityProof::Import { peer, hash, number, proof }) => - CustomMessageOutcome::FinalityProofImport(peer, hash, number, proof), - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - } + /// Sets the list of reserved peers for syncing purposes. + pub fn set_reserved_peers(&self, peers: HashSet) { + self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_SYNC, peers.clone()); + self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_TX, peers); + } + + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle.remove_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + } else { + log::error!( + target: "sub-libp2p", + "remove_set_reserved_peer with unknown protocol: {}", + protocol + ); } } - fn format_stats(&self) -> String { - let mut out = String::new(); - for (id, stats) in &self.context_data.stats { - let _ = writeln!( - &mut out, - "{}: In: {} bytes ({}), Out: {} bytes ({})", - id, - stats.bytes_in, - stats.count_in, - stats.bytes_out, - stats.count_out, + /// Adds a `PeerId` to the list of reserved peers. + pub fn add_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle.add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + } else { + log::error!( + target: "sub-libp2p", + "add_set_reserved_peer with unknown protocol: {}", + protocol ); } - out } - fn report_metrics(&self) { - use std::convert::TryInto; + /// Notify the protocol that we have learned about the existence of nodes on the default set. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_default_set_discovered_nodes(&mut self, peer_ids: impl Iterator) { + for peer_id in peer_ids { + self.peerset_handle.add_to_peers_set(HARDCODED_PEERSETS_SYNC, peer_id); + } + } - if let Some(metrics) = &self.metrics { - let mut obsolete_requests: u64 = 0; - for peer in self.context_data.peers.values() { - let n = peer.obsolete_requests.len().try_into().unwrap_or(std::u64::MAX); - obsolete_requests = obsolete_requests.saturating_add(n); - } - metrics.obsolete_requests.set(obsolete_requests); + /// Add a peer to a peers set. + pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle.add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + } else { + log::error!( + target: "sub-libp2p", + "add_to_peers_set with unknown protocol: {}", + protocol + ); + } + } + + /// Remove a peer from a peers set. + pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle.remove_from_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + } else { + log::error!( + target: "sub-libp2p", + "remove_from_peers_set with unknown protocol: {}", + protocol + ); + } + } - let n = self.context_data.peers.len().try_into().unwrap_or(std::u64::MAX); + fn report_metrics(&self) { + if let Some(metrics) = &self.metrics { + let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); metrics.peers.set(n); let m = self.sync.metrics(); @@ -1427,77 +1432,79 @@ impl Protocol { .set(m.justifications.failed_requests.into()); metrics.justifications.with_label_values(&["importing"]) .set(m.justifications.importing_requests.into()); - - metrics.finality_proofs.with_label_values(&["pending"]) - .set(m.finality_proofs.pending_requests.into()); - metrics.finality_proofs.with_label_values(&["active"]) - .set(m.finality_proofs.active_requests.into()); - metrics.finality_proofs.with_label_values(&["failed"]) - .set(m.finality_proofs.failed_requests.into()); - metrics.finality_proofs.with_label_values(&["importing"]) - .set(m.finality_proofs.importing_requests.into()); } } } +fn prepare_block_request( + peers: &mut HashMap>, + who: PeerId, + request: message::BlockRequest, +) -> CustomMessageOutcome { + let (tx, rx) = oneshot::channel(); + + if let Some(ref mut peer) = peers.get_mut(&who) { + peer.block_request = Some((request.clone(), rx)); + } + + let request = crate::schema::v1::BlockRequest { + fields: request.fields.to_be_u32(), + from_block: match request.from { + message::FromBlock::Hash(h) => + Some(crate::schema::v1::block_request::FromBlock::Hash(h.encode())), + message::FromBlock::Number(n) => + Some(crate::schema::v1::block_request::FromBlock::Number(n.encode())), + }, + to_block: request.to.map(|h| h.encode()).unwrap_or_default(), + direction: request.direction as i32, + max_blocks: request.max.unwrap_or(0), + }; + + CustomMessageOutcome::BlockRequest { + target: who, + request: request, + pending_response: tx, + } +} + /// Outcome of an incoming custom message. #[derive(Debug)] #[must_use] pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocols: Vec, + protocol: Cow<'static, str>, roles: Roles, notifications_sink: NotificationsSink }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { remote: PeerId, - protocols: Vec, + protocol: Cow<'static, str>, notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocols: Vec }, + NotificationStreamClosed { remote: PeerId, protocol: Cow<'static, str> }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(ConsensusEngineId, Bytes)> }, + NotificationsReceived { remote: PeerId, messages: Vec<(Cow<'static, str>, Bytes)> }, /// A new block request must be emitted. - /// You must later call either [`Protocol::on_block_response`] or - /// [`Protocol::on_block_request_failed`]. - /// Each peer can only have one active request. If a request already exists for this peer, it - /// must be silently discarded. - /// It is the responsibility of the handler to ensure that a timeout exists. - BlockRequest { target: PeerId, request: message::BlockRequest }, - /// A new finality proof request must be emitted. - /// Once you have the response, you must call `Protocol::on_finality_proof_response`. - /// It is the responsibility of the handler to ensure that a timeout exists. - /// If the request times out, or the peer responds in an invalid way, the peer has to be - /// disconnect. This will inform the state machine that the request it has emitted is stale. - FinalityProofRequest { target: PeerId, block_hash: B::Hash, request: Vec }, + BlockRequest { + target: PeerId, + request: crate::schema::v1::BlockRequest, + pending_response: oneshot::Sender, RequestFailure>>, + }, /// Peer has a reported a new head of chain. PeerNewBest(PeerId, NumberFor), + /// Now connected to a new peer for syncing purposes. + SyncConnected(PeerId), + /// No longer connected to a peer for syncing purposes. + SyncDisconnected(PeerId), None, } -fn update_peer_request( - peers: &mut HashMap>, - who: &PeerId, - request: &mut message::BlockRequest, -) { - if let Some(ref mut peer) = peers.get_mut(who) { - request.id = peer.next_request_id; - peer.next_request_id += 1; - if let Some((timestamp, request)) = peer.block_request.take() { - trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); - peer.obsolete_requests.insert(request.id, timestamp); - } - peer.block_request = Some((Instant::now(), request.clone())); - } -} - impl NetworkBehaviour for Protocol { type ProtocolsHandler = ::ProtocolsHandler; type OutEvent = CustomMessageOutcome; @@ -1549,6 +1556,80 @@ impl NetworkBehaviour for Protocol { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); } + // Check for finished outgoing requests. + let mut finished_block_requests = Vec::new(); + for (id, peer) in self.peers.iter_mut() { + if let Peer { block_request: Some((_, pending_response)), .. } = peer { + match pending_response.poll_unpin(cx) { + Poll::Ready(Ok(Ok(resp))) => { + let (req, _) = peer.block_request.take().unwrap(); + + let protobuf_response = match crate::schema::v1::BlockResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + trace!(target: "sync", "Failed to decode block request to peer {:?}: {:?}.", id, e); + self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue; + } + }; + + finished_block_requests.push((id.clone(), req, protobuf_response)); + }, + Poll::Ready(Ok(Err(e))) => { + peer.block_request.take(); + trace!(target: "sync", "Block request to peer {:?} failed: {:?}.", id, e); + + match e { + RequestFailure::Network(OutboundFailure::Timeout) => { + self.peerset_handle.report_peer(id.clone(), rep::TIMEOUT); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + } + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { + self.peerset_handle.report_peer(id.clone(), rep::BAD_PROTOCOL); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + } + RequestFailure::Network(OutboundFailure::DialFailure) => { + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + } + RequestFailure::Refused => { + self.peerset_handle.report_peer(id.clone(), rep::REFUSED); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + } + RequestFailure::Network(OutboundFailure::ConnectionClosed) + | RequestFailure::NotConnected => { + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + RequestFailure::UnknownProtocol => { + debug_assert!(false, "Block request protocol should always be known."); + } + RequestFailure::Obsolete => { + debug_assert!( + false, + "Can not receive `RequestFailure::Obsolete` after dropping the \ + response receiver.", + ); + } + } + }, + Poll::Ready(Err(oneshot::Canceled)) => { + peer.block_request.take(); + trace!( + target: "sync", + "Block request to peer {:?} failed due to oneshot being canceled.", + id, + ); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + Poll::Pending => {}, + } + } + } + for (id, req, protobuf_response) in finished_block_requests { + let ev = self.on_block_response(id, req, protobuf_response); + self.pending_messages.push_back(ev); + } + while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { self.tick(); } @@ -1557,28 +1638,12 @@ impl NetworkBehaviour for Protocol { self.propagate_transactions(); } - for (id, mut r) in self.sync.block_requests() { - update_peer_request(&mut self.context_data.peers, &id, &mut r); - let event = CustomMessageOutcome::BlockRequest { - target: id.clone(), - request: r, - }; - self.pending_messages.push_back(event); - } - for (id, mut r) in self.sync.justification_requests() { - update_peer_request(&mut self.context_data.peers, &id, &mut r); - let event = CustomMessageOutcome::BlockRequest { - target: id, - request: r, - }; + for (id, request) in self.sync.block_requests() { + let event = prepare_block_request(&mut self.peers, id.clone(), request); self.pending_messages.push_back(event); } - for (id, r) in self.sync.finality_proof_requests() { - let event = CustomMessageOutcome::FinalityProofRequest { - target: id, - block_hash: r.block, - request: r.request, - }; + for (id, request) in self.sync.justification_requests() { + let event = prepare_block_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } if let Poll::Ready(Some((tx_hash, result))) = self.pending_transactions.poll_next_unpin(cx) { @@ -1610,86 +1675,146 @@ impl NetworkBehaviour for Protocol { return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), }; let outcome = match event { - GenericProtoOut::CustomProtocolOpen { peer_id, received_handshake, notifications_sink, .. } => { - // `received_handshake` can be either a `Status` message if received from the - // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block - // announces substream. - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { - Ok(GenericMessage::Status(handshake)) => { - let handshake = BlockAnnouncesHandshake { - roles: handshake.roles, - best_number: handshake.best_number, - best_hash: handshake.best_hash, - genesis_hash: handshake.genesis_hash, - }; - - self.on_peer_connected(peer_id, handshake, notifications_sink) - }, - Ok(msg) => { - debug!( - target: "sync", - "Expected Status message from {}, but got {:?}", - peer_id, - msg, - ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - } - Err(err) => { - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { - Ok(handshake) => { - self.on_peer_connected(peer_id, handshake, notifications_sink) - } - Err(err2) => { - debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {} & {}", - peer_id, - received_handshake, - err.what(), - err2, + GenericProtoOut::CustomProtocolOpen { peer_id, set_id, received_handshake, notifications_sink, .. } => { + // Set number 0 is hardcoded the default set of peers we sync from. + if set_id == HARDCODED_PEERSETS_SYNC { + // `received_handshake` can be either a `Status` message if received from the + // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block + // announces substream. + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(GenericMessage::Status(handshake)) => { + let handshake = BlockAnnouncesHandshake { + roles: handshake.roles, + best_number: handshake.best_number, + best_hash: handshake.best_hash, + genesis_hash: handshake.genesis_hash, + }; + + if self.on_sync_peer_connected(peer_id.clone(), handshake).is_ok() { + // Set 1 is kept in sync with the connected peers of set 0. + self.peerset_handle.add_reserved_peer( + HARDCODED_PEERSETS_TX, + peer_id.clone() ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::SyncConnected(peer_id) + } else { CustomMessageOutcome::None } + }, + Ok(msg) => { + debug!( + target: "sync", + "Expected Status message from {}, but got {:?}", + peer_id, + msg, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + } + Err(err) => { + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(handshake) => { + if self.on_sync_peer_connected(peer_id.clone(), handshake).is_ok() { + // Set 1 is kept in sync with the connected peers of set 0. + self.peerset_handle.add_reserved_peer( + HARDCODED_PEERSETS_TX, + peer_id.clone() + ); + CustomMessageOutcome::SyncConnected(peer_id) + } else { + CustomMessageOutcome::None + } + } + Err(err2) => { + debug!( + target: "sync", + "Couldn't decode handshake sent by {}: {:?}: {} & {}", + peer_id, + received_handshake, + err, + err2, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + } + } + } + } + + } else if set_id == HARDCODED_PEERSETS_TX { + // Nothing to do. + CustomMessageOutcome::None + } else { + match message::Roles::decode_all(&received_handshake[..]) { + Ok(roles) => + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + roles, + notifications_sink, + }, + Err(err) => { + debug!(target: "sync", "Failed to parse remote handshake: {}", err); + self.behaviour.disconnect_peer(&peer_id, set_id); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None } } } } - GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, .. } => { - CustomMessageOutcome::NotificationStreamReplaced { - remote: peer_id, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - notifications_sink, + GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { + if set_id == HARDCODED_PEERSETS_SYNC || set_id == HARDCODED_PEERSETS_TX { + CustomMessageOutcome::None + } else { + CustomMessageOutcome::NotificationStreamReplaced { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + notifications_sink, + } } }, - GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { - self.on_peer_disconnected(peer_id) - }, - GenericProtoOut::LegacyMessage { peer_id, message } => - self.on_custom_message(peer_id, message), - GenericProtoOut::Notification { peer_id, protocol_name, message } => - match self.legacy_equiv_by_name.get(&protocol_name) { - Some(Fallback::Consensus(engine_id)) => { - CustomMessageOutcome::NotificationsReceived { - remote: peer_id, - messages: vec![(*engine_id, message.freeze())], - } - } - Some(Fallback::Transactions) => { - if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { - self.on_transactions(peer_id, m); - } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); - } + GenericProtoOut::CustomProtocolClosed { peer_id, set_id } => { + // Set number 0 is hardcoded the default set of peers we sync from. + if set_id == HARDCODED_PEERSETS_SYNC { + if self.on_sync_peer_disconnected(peer_id.clone()).is_ok() { + // Set 1 is kept in sync with the connected peers of set 0. + self.peerset_handle.remove_reserved_peer( + HARDCODED_PEERSETS_TX, + peer_id.clone() + ); + CustomMessageOutcome::SyncDisconnected(peer_id) + } else { + log::debug!( + target: "sync", + "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", + peer_id + ); CustomMessageOutcome::None } - Some(Fallback::BlockAnnounce) => { + } else if set_id == HARDCODED_PEERSETS_TX { + CustomMessageOutcome::None + } else { + CustomMessageOutcome::NotificationStreamClosed { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + } + } + }, + GenericProtoOut::LegacyMessage { peer_id, message } => { + if self.peers.contains_key(&peer_id) { + self.on_custom_message(peer_id, message) + } else { + CustomMessageOutcome::None + } + }, + GenericProtoOut::Notification { peer_id, set_id, message } => + match set_id { + HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => { if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { self.push_block_announce_validation(peer_id, announce); @@ -1705,18 +1830,47 @@ impl NetworkBehaviour for Protocol { CustomMessageOutcome::None } } - None => { - debug!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); + HARDCODED_PEERSETS_TX if self.peers.contains_key(&peer_id) => { + if let Ok(m) = as Decode>::decode( + &mut message.as_ref(), + ) { + self.on_transactions(peer_id, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + CustomMessageOutcome::None + } + HARDCODED_PEERSETS_SYNC | HARDCODED_PEERSETS_TX => { + debug!( + target: "sync", + "Received sync or transaction for peer earlier refused by sync layer: {}", + peer_id + ); CustomMessageOutcome::None } + _ => { + let protocol_name = self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(); + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + messages: vec![(protocol_name, message.freeze())], + } + } } }; - if let CustomMessageOutcome::None = outcome { - Poll::Pending - } else { - Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) + if !matches!(outcome, CustomMessageOutcome::::None) { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)); + } + + if let Some(message) = self.pending_messages.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); } + + // This block can only be reached if an event was pulled from the behaviour and that + // resulted in `CustomMessageOutcome::None`. Since there might be another pending + // message from the behaviour, the task is scheduled again. + cx.waker().wake_by_ref(); + Poll::Pending } fn inject_addr_reach_failure( @@ -1752,9 +1906,3 @@ impl NetworkBehaviour for Protocol { self.behaviour.inject_listener_closed(id, reason); } } - -impl Drop for Protocol { - fn drop(&mut self) { - debug!(target: "sync", "Network stats:\n{}", self.format_stats()); - } -} diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index 637bf805b5024..e20dbcb9ee279 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. @@ -20,7 +22,7 @@ use bytes::Bytes; use libp2p::core::PeerId; use libp2p::kad::record::Key; -use sp_runtime::ConsensusEngineId; +use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -46,6 +48,18 @@ pub enum Event { /// Event generated by a DHT. Dht(DhtEvent), + /// Now connected to a new peer for syncing purposes. + SyncConnected { + /// Node we are now syncing from. + remote: PeerId, + }, + + /// Now disconnected from a peer for syncing purposes. + SyncDisconnected { + /// Node we are no longer syncing from. + remote: PeerId, + }, + /// Opened a substream with the given node with the given notifications protocol. /// /// The protocol is always one of the notification protocols that have been registered. @@ -53,7 +67,7 @@ pub enum Event { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Role of the remote. role: ObservedRole, }, @@ -64,7 +78,7 @@ pub enum Event { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -72,7 +86,7 @@ pub enum Event { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, } diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs index 3133471b0d249..a305fc1f5ea5f 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Implementation of libp2p's `NetworkBehaviour` trait that opens a single substream with the //! remote and then allows any communication with them. @@ -21,7 +23,7 @@ //! network, then performs the Substrate protocol handling on top. pub use self::behaviour::{GenericProto, GenericProtoOut}; -pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError}; +pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready}; mod behaviour; mod handler; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 7b62b154016c3..cd77852c9107c 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::config::ProtocolId; use crate::protocol::generic_proto::{ @@ -42,45 +44,35 @@ use wasm_timer::Instant; /// Network behaviour that handles opening substreams for custom protocols with other peers. /// -/// ## Legacy vs new protocol -/// -/// The `GenericProto` behaves as following: -/// -/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in -/// the source code) on that connection. This substream name depends on the `protocol_id` and -/// `versions` passed at initialization. If the remote refuses this substream, we close the -/// connection. -/// -/// - For each registered protocol, we also open an additional substream for this protocol. If the -/// remote refuses this substream, then it's fine. -/// -/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy -/// substream, or `write_notification` to indicate a registered protocol. If the registered -/// protocol was refused or isn't supported by the remote, we always use the legacy instead. -/// -/// ## How it works +/// # How it works /// /// The role of the `GenericProto` is to synchronize the following components: /// /// - The libp2p swarm that opens new connections and reports disconnects. -/// - The connection handler (see `handler.rs`) that handles individual connections. +/// - The connection handler (see `group.rs`) that handles individual connections. /// - The peerset manager (PSM) that requests links to peers to be established or broken. /// - The external API, that requires knowledge of the links that have been established. /// -/// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed, -/// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the -/// peerset manager. For example, if the peerset manager requires a disconnection, we disable the -/// connection handlers of that peer. The Open/Closed component must be in sync with the external -/// API. +/// In the state machine below, each `PeerId` is attributed one of these states: /// -/// However, a connection handler for a peer only exists if we are actually connected to that peer. -/// What this means is that there are six possible states for each peer: Disconnected, Dialing -/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed. -/// Most notably, the Dialing state must correspond to a "link established" state in the peerset -/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a -/// peer or connected to it. +/// - [`PeerState::Requested`]: No open connection, but requested by the peerset. Currently dialing. +/// - [`PeerState::Disabled`]: Has open TCP connection(s) unbeknownst to the peerset. No substream +/// is open. +/// - [`PeerState::Enabled`]: Has open TCP connection(s), acknowledged by the peerset. +/// - Notifications substreams are open on at least one connection, and external +/// API has been notified. +/// - Notifications substreams aren't open. +/// - [`PeerState::Incoming`]: Has open TCP connection(s) and remote would like to open substreams. +/// Peerset has been asked to attribute an inbound slot. /// -/// There may be multiple connections to a peer. However, the status of a peer on +/// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, +/// we back-off for a few seconds. If the PSM requests connecting to a peer that is currently +/// backed-off, the next dialing attempt is delayed until after the ban expires. However, the PSM +/// will still consider the peer to be connected. This "ban" is thus not a ban in a strict sense: +/// if a backed-off peer tries to connect, the connection is accepted. A ban only delays dialing +/// attempts. +/// +/// There may be multiple connections to a peer. The status of a peer on /// the API of this behaviour and towards the peerset manager is aggregated in /// the following way: /// @@ -94,9 +86,9 @@ use wasm_timer::Instant; /// in terms of potential reordering and dropped messages. Messages can /// be received on any connection. /// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the -/// first connection reports `NotifsHandlerOut::Open`. +/// first connection reports `NotifsHandlerOut::OpenResultOk`. /// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the -/// last connection reports `NotifsHandlerOut::Closed`. +/// last connection reports `NotifsHandlerOut::ClosedResult`. /// /// In this way, the number of actual established connections to the peer is /// an implementation detail of this behaviour. Note that, in practice and at @@ -104,29 +96,20 @@ use wasm_timer::Instant; /// and only as a result of simultaneous dialing. However, the implementation /// accommodates for any number of connections. /// -/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for -/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next -/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider -/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer -/// tries to connect, the connection is accepted. A ban only delays dialing attempts. -/// pub struct GenericProto { - /// `PeerId` of the local node. - local_peer_id: PeerId, - /// Legacy protocol to open with peers. Never modified. legacy_protocol: RegisteredProtocol, /// Notification protocols. Entries are only ever added and not removed. /// Contains, for each protocol, the protocol name and the message to send as part of the /// initial handshake. - notif_protocols: Vec<(Cow<'static, str>, Arc>>)>, + notif_protocols: Vec<(Cow<'static, str>, Arc>>, u64)>, /// Receiver for instructions about who to connect to or disconnect from. peerset: sc_peerset::Peerset, /// List of peers in our state. - peers: FnvHashMap, + peers: FnvHashMap<(PeerId, sc_peerset::SetId), PeerState>, /// The elements in `peers` occasionally contain `Delay` objects that we would normally have /// to be polled one by one. In order to avoid doing so, as an optimization, every `Delay` is @@ -135,7 +118,7 @@ pub struct GenericProto { /// /// By design, we never remove elements from this list. Elements are removed only when the /// `Delay` triggers. As such, this stream may produce obsolete elements. - delays: stream::FuturesUnordered + Send>>>, + delays: stream::FuturesUnordered + Send>>>, /// [`DelayId`] to assign to the next delay. next_delay_id: DelayId, @@ -157,6 +140,8 @@ pub struct GenericProto { struct DelayId(u64); /// State of a peer we're connected to. +/// +/// The variants correspond to the state of the peer w.r.t. the peerset. #[derive(Debug)] enum PeerState { /// State is poisoned. This is a temporary state for a peer and we should always switch back @@ -166,9 +151,11 @@ enum PeerState { /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial /// delay to the connection. - Banned { - /// Until when the peer is banned. - until: Instant, + Backoff { + /// When the ban expires. For clean-up purposes. References an entry in `delays`. + timer: DelayId, + /// Until when the peer is backed-off. + timer_deadline: Instant, }, /// The peerset requested that we connect to this peer. We are currently not connected. @@ -182,40 +169,54 @@ enum PeerState { /// The peerset requested that we connect to this peer. We are currently dialing this peer. Requested, - /// We are connected to this peer but the peerset refused it. + /// We are connected to this peer but the peerset hasn't requested it or has denied it. /// - /// We may still have ongoing traffic with that peer, but it should cease shortly. + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. Disabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. - banned_until: Option, + /// If `Some`, any connection request from the peerset to this peer is delayed until the + /// given `Instant`. + backoff_until: Option, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer but we are not opening any Substrate substream. The handler - /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, - /// but should get disconnected in a few seconds. + /// We are connected to this peer. The peerset has requested a connection to this peer, but + /// it is currently in a "backed-off" phase. The state will switch to `Enabled` once the timer + /// expires. + /// + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. + /// + /// The handler will be opened when `timer` fires. DisabledPendingEnable { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// When to enable this remote. References an entry in `delays`. timer: DelayId, /// When the `timer` will trigger. timer_deadline: Instant, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer and the peerset has accepted it. The handler is in the - /// enabled state. + /// We are connected to this peer and the peerset has accepted it. Enabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We received an incoming connection from this peer and forwarded that - /// connection request to the peerset. The connection handlers are waiting - /// for initialisation, i.e. to be enabled or disabled based on whether - /// the peerset accepts or rejects the peer. - Incoming, + /// We are connected to this peer. We have received an `OpenDesiredByRemote` from one of the + /// handlers and forwarded that request to the peerset. The connection handlers are waiting for + /// a response, i.e. to be opened or closed based on whether the peerset accepts or rejects + /// the peer. + Incoming { + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + backoff_until: Option, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, + }, } impl PeerState { @@ -229,18 +230,19 @@ impl PeerState { /// that is open for custom protocol traffic. fn get_open(&self) -> Option<&NotificationsSink> { match self { - PeerState::Disabled { open, .. } | - PeerState::DisabledPendingEnable { open, .. } | - PeerState::Enabled { open, .. } => - if !open.is_empty() { - Some(&open[0].1) - } else { - None - } + PeerState::Enabled { connections, .. } => connections + .iter() + .filter_map(|(_, s)| match s { + ConnectionState::Open(s) => Some(s), + _ => None, + }) + .next(), PeerState::Poisoned => None, - PeerState::Banned { .. } => None, + PeerState::Backoff { .. } => None, PeerState::PendingRequest { .. } => None, PeerState::Requested => None, + PeerState::Disabled { .. } => None, + PeerState::DisabledPendingEnable { .. } => None, PeerState::Incoming { .. } => None, } } @@ -249,7 +251,7 @@ impl PeerState { fn is_requested(&self) -> bool { match self { PeerState::Poisoned => false, - PeerState::Banned { .. } => false, + PeerState::Backoff { .. } => false, PeerState::PendingRequest { .. } => true, PeerState::Requested => true, PeerState::Disabled { .. } => false, @@ -260,11 +262,44 @@ impl PeerState { } } +/// State of the handler of a single connection visible from this state machine. +#[derive(Debug)] +enum ConnectionState { + /// Connection is in the `Closed` state, meaning that the remote hasn't requested anything. + Closed, + + /// Connection is either in the `Open` or the `Closed` state, but a + /// [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. + Closing, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message has been sent. + /// An `OpenResultOk`/`OpenResultErr` message is expected. + Opening, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message then a + /// [`NotifsHandlerIn::Close`] message has been sent. An `OpenResultOk`/`OpenResultErr` message + /// followed with a `CloseResult` message are expected. + OpeningThenClosing, + + /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesiredByRemote`] + /// message has been received, meaning that the remote wants to open a substream. + OpenDesiredByRemote, + + /// Connection is in the `Open` state. + /// + /// The external API is notified of a channel with this peer if any of its connection is in + /// this state. + Open(NotificationsSink), +} + /// State of an "incoming" message sent to the peer set manager. #[derive(Debug)] struct IncomingPeer { - /// Id of the remote peer of the incoming connection. + /// Id of the remote peer of the incoming substream. peer_id: PeerId, + /// Id of the set the incoming substream would belong to. + set_id: sc_peerset::SetId, /// If true, this "incoming" still corresponds to an actual connection. If false, then the /// connection corresponding to it has been closed or replaced already. alive: bool, @@ -279,6 +314,8 @@ pub enum GenericProtoOut { CustomProtocolOpen { /// Id of the peer we are connected to. peer_id: PeerId, + /// Peerset set ID the substream is tied to. + set_id: sc_peerset::SetId, /// Handshake that was sent to us. /// This is normally a "Status" message, but this is out of the concern of this code. received_handshake: Vec, @@ -294,6 +331,8 @@ pub enum GenericProtoOut { CustomProtocolReplaced { /// Id of the peer we are connected to. peer_id: PeerId, + /// Peerset set ID the substream is tied to. + set_id: sc_peerset::SetId, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -303,8 +342,8 @@ pub enum GenericProtoOut { CustomProtocolClosed { /// Id of the peer we were connected to. peer_id: PeerId, - /// Reason why the substream closed, for debugging purposes. - reason: Cow<'static, str>, + /// Peerset set ID the substream was tied to. + set_id: sc_peerset::SetId, }, /// Receives a message on the legacy substream. @@ -321,8 +360,8 @@ pub enum GenericProtoOut { Notification { /// Id of the peer the message came from. peer_id: PeerId, - /// Engine corresponding to the message. - protocol_name: Cow<'static, str>, + /// Peerset set ID the substream is tied to. + set_id: sc_peerset::SetId, /// Message that has been received. message: BytesMut, }, @@ -331,15 +370,14 @@ pub enum GenericProtoOut { impl GenericProto { /// Creates a `CustomProtos`. pub fn new( - local_peer_id: PeerId, protocol: impl Into, versions: &[u8], handshake_message: Vec, peerset: sc_peerset::Peerset, - notif_protocols: impl Iterator, Vec)>, + notif_protocols: impl Iterator, Vec, u64)>, ) -> Self { let notif_protocols = notif_protocols - .map(|(n, hs)| (n, Arc::new(RwLock::new(hs)))) + .map(|(n, hs, sz)| (n, Arc::new(RwLock::new(hs)), sz)) .collect::>(); assert!(!notif_protocols.is_empty()); @@ -348,7 +386,6 @@ impl GenericProto { let legacy_protocol = RegisteredProtocol::new(protocol, versions, legacy_handshake_message); GenericProto { - local_peer_id, legacy_protocol, notif_protocols, peerset, @@ -361,28 +398,17 @@ impl GenericProto { } } - /// Registers a new notifications protocol. - /// - /// You are very strongly encouraged to call this method very early on. Any open connection - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notif_protocol( - &mut self, - protocol_name: impl Into>, - handshake_msg: impl Into> - ) { - self.notif_protocols.push((protocol_name.into(), Arc::new(RwLock::new(handshake_msg.into())))); - } - /// Modifies the handshake of the given notifications protocol. - /// - /// Has no effect if the protocol is unknown. pub fn set_notif_protocol_handshake( &mut self, - protocol_name: &str, + set_id: sc_peerset::SetId, handshake_message: impl Into> ) { - if let Some(protocol) = self.notif_protocols.iter_mut().find(|(name, _)| name == protocol_name) { - *protocol.1.write() = handshake_message.into(); + if let Some(p) = self.notif_protocols.get_mut(usize::from(set_id)) { + *p.1.write() = handshake_message.into(); + } else { + log::error!(target: "sub-libp2p", "Unknown handshake change set: {:?}", set_id); + debug_assert!(false); } } @@ -401,33 +427,29 @@ impl GenericProto { /// Returns the list of all the peers we have an open channel to. pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { - self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) + self.peers.iter().filter(|(_, state)| state.is_open()).map(|((id, _), _)| id) } - /// Returns true if we have an open connection to the given peer. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.peers.get(peer_id).map(|p| p.is_open()).unwrap_or(false) - } - - /// Returns the [`NotificationsSink`] that sends notifications to the given peer, or `None` - /// if the custom protocols aren't opened with this peer. - /// - /// If [`GenericProto::is_open`] returns `true` for this `PeerId`, then this method is - /// guaranteed to return `Some`. - pub fn notifications_sink(&self, peer_id: &PeerId) -> Option<&NotificationsSink> { - self.peers.get(peer_id).and_then(|p| p.get_open()) + /// Returns true if we have an open substream to the given peer. + pub fn is_open(&self, peer_id: &PeerId, set_id: sc_peerset::SetId) -> bool { + self.peers.get(&(peer_id.clone(), set_id)).map(|p| p.is_open()).unwrap_or(false) } /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - debug!(target: "sub-libp2p", "External API => Disconnect {:?}", peer_id); - self.disconnect_peer_inner(peer_id, None); + pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: sc_peerset::SetId) { + debug!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id); + self.disconnect_peer_inner(peer_id, set_id, None); } /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer /// for the specific duration. - fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { + fn disconnect_peer_inner( + &mut self, + peer_id: &PeerId, + set_id: sc_peerset::SetId, + ban: Option + ) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((peer_id.clone(), set_id)) { entry } else { return @@ -438,48 +460,82 @@ impl GenericProto { st @ PeerState::Disabled { .. } => *entry.into_mut() = st, st @ PeerState::Requested => *entry.into_mut() = st, st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, - st @ PeerState::Banned { .. } => *entry.into_mut() = st, + st @ PeerState::Backoff { .. } => *entry.into_mut() = st, // DisabledPendingEnable => Disabled. PeerState::DisabledPendingEnable { - open, + connections, timer_deadline, timer: _ } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - let banned_until = Some(if let Some(ban) = ban { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + let backoff_until = Some(if let Some(ban) = ban { cmp::max(timer_deadline, Instant::now() + ban) } else { timer_deadline }); *entry.into_mut() = PeerState::Disabled { - open, - banned_until + connections, + backoff_until } }, // Enabled => Disabled. - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); + // All open or opening connections are sent a `Close` message. + // If relevant, the external API is instantly notified. + PeerState::Enabled { mut connections } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { + debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + set_id, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::OpeningThenClosing; + } + + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)))); + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening))); + + let backoff_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { - open, - banned_until + connections, + backoff_until } }, // Incoming => Disabled. - PeerState::Incoming => { + // Ongoing opening requests from the remote are rejected. + PeerState::Incoming { mut connections, backoff_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *entry.key() && i.alive) { + .find(|i| i.peer_id == entry.key().0 && i.set_id == set_id && i.alive) { inc } else { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ @@ -488,16 +544,30 @@ impl GenericProto { }; inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + let backoff_until = match (backoff_until, ban) { + (Some(a), Some(b)) => Some(cmp::max(a, Instant::now() + b)), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(Instant::now() + b), + (None, None) => None, + }; + + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); *entry.into_mut() = PeerState::Disabled { - open: SmallVec::new(), - banned_until + connections, + backoff_until } }, @@ -507,42 +577,10 @@ impl GenericProto { } /// Returns the list of all the peers that the peerset currently requests us to be connected to. - pub fn requested_peers<'a>(&'a self) -> impl Iterator + 'a { - self.peers.iter().filter(|(_, state)| state.is_requested()).map(|(id, _)| id) - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id) { - None => false, - Some(PeerState::Disabled { .. }) => false, - Some(PeerState::DisabledPendingEnable { .. }) => false, - Some(PeerState::Enabled { .. }) => true, - Some(PeerState::Incoming { .. }) => false, - Some(PeerState::Requested) => false, - Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, - Some(PeerState::Poisoned) => false, - } - } - - /// Notify the behaviour that we have learned about the existence of nodes. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - let local_peer_id = &self.local_peer_id; - self.peerset.discovered(peer_ids.filter_map(|peer_id| { - if peer_id == *local_peer_id { - error!( - target: "sub-libp2p", - "Discovered our own identity. This is a minor inconsequential bug." - ); - return None; - } - - debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); - Some(peer_id) - })); + pub fn requested_peers<'a>(&'a self, set_id: sc_peerset::SetId) -> impl Iterator + 'a { + self.peers.iter() + .filter(move |((_, set), state)| *set == set_id && state.is_requested()) + .map(|((id, _), _)| id) } /// Sends a notification to a peer. @@ -558,10 +596,10 @@ impl GenericProto { pub fn write_notification( &mut self, target: &PeerId, - protocol_name: Cow<'static, str>, + set_id: sc_peerset::SetId, message: impl Into>, ) { - let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { + let notifs_sink = match self.peers.get(&(target.clone(), set_id)).and_then(|p| p.get_open()) { None => { debug!(target: "sub-libp2p", "Tried to sent notification to {:?} without an open channel.", @@ -571,17 +609,18 @@ impl GenericProto { Some(sink) => sink }; + let message = message.into(); + trace!( target: "sub-libp2p", - "External API => Notification({:?}, {:?})", + "External API => Notification({:?}, {:?}, {} bytes)", target, - protocol_name, - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - notifs_sink.send_sync_notification( - protocol_name, - message + set_id, + message.len(), ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + + notifs_sink.send_sync_notification(message); } /// Returns the state of the peerset manager, for debugging purposes. @@ -590,15 +629,18 @@ impl GenericProto { } /// Function that is called when the peerset wants us to connect to a peer. - fn peerset_report_connect(&mut self, peer_id: PeerId) { - let mut occ_entry = match self.peers.entry(peer_id) { + fn peerset_report_connect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { + // If `PeerId` is unknown to us, insert an entry, start dialing, and return early. + let mut occ_entry = match self.peers.entry((peer_id.clone(), set_id)) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { // If there's no entry in `self.peers`, start dialing. - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Starting to connect", + entry.key().0, set_id); + debug!(target: "sub-libp2p", "Libp2p <= Dial {}", entry.key().0); + // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: entry.key().clone(), + peer_id: entry.key().0.clone(), condition: DialPeerCondition::Disconnected }); entry.insert(PeerState::Requested); @@ -609,175 +651,275 @@ impl GenericProto { let now = Instant::now(); match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { - PeerState::Banned { ref until } if *until > now => { - let peer_id = occ_entry.key().clone(); - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ - until {:?}", peer_id, until); - - let delay_id = self.next_delay_id; - self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*until - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id) - }.boxed()); - + // Backoff (not expired) => PendingRequest + PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => { + let peer_id = occ_entry.key().0.clone(); + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Will start to connect at \ + until {:?}", peer_id, set_id, timer_deadline); *occ_entry.into_mut() = PeerState::PendingRequest { - timer: delay_id, - timer_deadline: *until, + timer: *timer, + timer_deadline: *timer_deadline, }; }, - PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); + // Backoff (expired) => Requested + PeerState::Backoff { .. } => { + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Starting to connect", + occ_entry.key().0, set_id); debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); + // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: occ_entry.key().clone(), + peer_id: occ_entry.key().0.clone(), condition: DialPeerCondition::Disconnected }); *occ_entry.into_mut() = PeerState::Requested; }, + // Disabled (with non-expired ban) => DisabledPendingEnable PeerState::Disabled { - open, - banned_until: Some(ref banned) - } if *banned > now => { - let peer_id = occ_entry.key().clone(); - debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", - peer_id, banned); + connections, + backoff_until: Some(ref backoff) + } if *backoff > now => { + let peer_id = occ_entry.key().0.clone(); + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): But peer is backed-off until {:?}", + peer_id, set_id, backoff); let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*banned - now); + let delay = futures_timer::Delay::new(*backoff - now); self.delays.push(async move { delay.await; - (delay_id, peer_id) + (delay_id, peer_id, set_id) }.boxed()); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - open, + connections, timer: delay_id, - timer_deadline: *banned, + timer_deadline: *backoff, }; }, - PeerState::Disabled { open, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", - occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open }; + // Disabled => Enabled + PeerState::Disabled { mut connections, backoff_until } => { + debug_assert!(!connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Open(_)) + })); + + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = connections.iter_mut() + .find(|(_, s)| matches!(s, ConnectionState::Closed)) + { + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", + occ_entry.key().0, set_id); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + *occ_entry.into_mut() = PeerState::Enabled { connections }; + } else { + // If no connection is available, switch to `DisabledPendingEnable` in order + // to try again later. + debug_assert!(connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing) + })); + debug!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): No connection in proper state. Delaying.", + occ_entry.key().0, set_id + ); + + let timer_deadline = { + let base = now + Duration::from_secs(5); + if let Some(backoff_until) = backoff_until { + cmp::max(base, backoff_until) + } else { + base + } + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + debug_assert!(timer_deadline > now); + let delay = futures_timer::Delay::new(timer_deadline - now); + self.delays.push(async move { + delay.await; + (delay_id, peer_id, set_id) + }.boxed()); + + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer: delay_id, + timer_deadline, + }; + } }, - PeerState::Incoming => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", - occ_entry.key()); + // Incoming => Enabled + PeerState::Incoming { mut connections, .. } => { + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", + occ_entry.key().0, set_id); if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *occ_entry.key() && i.alive) { + .find(|i| i.peer_id == occ_entry.key().0 && i.set_id == set_id && i.alive) { inc.alive = false; } else { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + occ_entry.key(), *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().0.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + } + + *occ_entry.into_mut() = PeerState::Enabled { connections }; }, + // Other states are kept as-is. st @ PeerState::Enabled { .. } => { warn!(target: "sub-libp2p", - "PSM => Connect({:?}): Already connected.", - occ_entry.key()); + "PSM => Connect({}, {:?}): Already connected.", + occ_entry.key().0, set_id); *occ_entry.into_mut() = st; + debug_assert!(false); }, st @ PeerState::DisabledPendingEnable { .. } => { warn!(target: "sub-libp2p", - "PSM => Connect({:?}): Already pending enabling.", - occ_entry.key()); + "PSM => Connect({}, {:?}): Already pending enabling.", + occ_entry.key().0, set_id); *occ_entry.into_mut() = st; + debug_assert!(false); }, st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { warn!(target: "sub-libp2p", - "PSM => Connect({:?}): Duplicate request.", - occ_entry.key()); + "PSM => Connect({}, {:?}): Duplicate request.", + occ_entry.key().0, set_id); *occ_entry.into_mut() = st; + debug_assert!(false); }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()); + debug_assert!(false); + }, } } /// Function that is called when the peerset wants us to disconnect from a peer. - fn peerset_report_disconnect(&mut self, peer_id: PeerId) { - let mut entry = match self.peers.entry(peer_id) { + fn peerset_report_disconnect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { + let mut entry = match self.peers.entry((peer_id, set_id)) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); + debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", + entry.key().0, set_id); return } }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { + debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", + entry.key().0, set_id); *entry.into_mut() = st; }, - PeerState::DisabledPendingEnable { - open, - timer_deadline, - timer: _ - } => { + // DisabledPendingEnable => Disabled + PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { + debug_assert!(!connections.is_empty()); debug!(target: "sub-libp2p", - "PSM => Drop({:?}): Interrupting pending enabling.", - entry.key()); + "PSM => Drop({}, {:?}): Interrupting pending enabling.", + entry.key().0, set_id); *entry.into_mut() = PeerState::Disabled { - open, - banned_until: Some(timer_deadline), + connections, + backoff_until: Some(timer_deadline), }; }, - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None + // Enabled => Disabled + PeerState::Enabled { mut connections } => { + debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.", + entry.key().0, set_id); + + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { + debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: entry.key().0.clone(), + set_id, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + entry.key(), *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().0.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::OpeningThenClosing; + } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + entry.key(), *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().0.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + *entry.into_mut() = PeerState::Disabled { connections, backoff_until: None } }, - st @ PeerState::Incoming => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", - entry.key()); - *entry.into_mut() = st; - }, + + // Requested => Ø PeerState::Requested => { // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other // sub-systems (such as the discovery mechanism) may require dialing this peer as // well at the same time. - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); + debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected.", + entry.key().0, set_id); entry.remove(); }, - PeerState::PendingRequest { timer_deadline, .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { until: timer_deadline } + + // PendingRequest => Backoff + PeerState::PendingRequest { timer, timer_deadline } => { + debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected", + entry.key().0, set_id); + *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), + // Invalid state transitions. + st @ PeerState::Incoming { .. } => { + error!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not enabled (Incoming).", + entry.key().0, set_id); + *entry.into_mut() = st; + debug_assert!(false); + }, + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); + debug_assert!(false); + }, } } @@ -792,28 +934,58 @@ impl GenericProto { }; if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, - sending back dropped", index, incoming.peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(incoming.peer_id); + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", + index, incoming.peer_id, incoming.set_id); + match self.peers.get_mut(&(incoming.peer_id.clone(), incoming.set_id)) { + Some(PeerState::DisabledPendingEnable { .. }) | + Some(PeerState::Enabled { .. }) => {} + _ => { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", + incoming.peer_id, incoming.set_id); + self.peerset.dropped(incoming.set_id, incoming.peer_id, sc_peerset::DropReason::Unknown); + }, + } return } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", - index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *state = PeerState::Enabled { open: SmallVec::new() }; + let state = match self.peers.get_mut(&(incoming.peer_id.clone(), incoming.set_id)) { + Some(s) => s, + None => { + debug_assert!(false); + return; + } + }; + + match mem::replace(state, PeerState::Poisoned) { + // Incoming => Enabled + PeerState::Incoming { mut connections, .. } => { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", + index, incoming.peer_id, incoming.set_id); + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + incoming.peer_id, *connec_id, incoming.set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: incoming.set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + } + + *state = PeerState::Enabled { connections }; + } + + // Any state other than `Incoming` is invalid. + peer => { + error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer); + debug_assert!(false); } - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) } } @@ -827,25 +999,40 @@ impl GenericProto { }; if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ - ignoring", index, incoming.peer_id); + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ + ignoring", index, incoming.peer_id, incoming.set_id); return } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", - index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *state = PeerState::Disabled { - open: SmallVec::new(), - banned_until: None - }; + let state = match self.peers.get_mut(&(incoming.peer_id.clone(), incoming.set_id)) { + Some(s) => s, + None => { + debug_assert!(false); + return; + } + }; + + match mem::replace(state, PeerState::Poisoned) { + // Incoming => Disabled + PeerState::Incoming { mut connections, backoff_until } => { + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", + index, incoming.peer_id, incoming.set_id); + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + incoming.peer_id, connec_id, incoming.set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: incoming.set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + *state = PeerState::Disabled { connections, backoff_until }; } peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", @@ -873,250 +1060,379 @@ impl NetworkBehaviour for GenericProto { } fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", - conn, endpoint, peer_id); - match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { - (st @ &mut PeerState::Requested, endpoint) | - (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { - debug!(target: "sub-libp2p", - "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", - peer_id, endpoint - ); - *st = PeerState::Enabled { open: SmallVec::new() }; - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); - } - - // Note: it may seem weird that "Banned" peers get treated as if they were absent. - // This is because the word "Banned" means "temporarily prevent outgoing connections to - // this peer", and not "banned" in the sense that we would refuse the peer altogether. - (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | - (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", - peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - peer_id, incoming_id); - self.peerset.incoming(peer_id.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: peer_id.clone(), - alive: true, - incoming_id, - }); - *st = PeerState::Incoming { }; - } + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + match self.peers.entry((peer_id.clone(), set_id)).or_insert(PeerState::Poisoned) { + // Requested | PendingRequest => Enabled + st @ &mut PeerState::Requested | + st @ &mut PeerState::PendingRequest { .. } => { + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.", + peer_id, set_id, endpoint + ); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *conn, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); - (st @ &mut PeerState::Poisoned, endpoint) | - (st @ &mut PeerState::Banned { .. }, endpoint) => { - let banned_until = if let PeerState::Banned { until } = st { - Some(*until) - } else { - None - }; - debug!(target: "sub-libp2p", - "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", - peer_id, endpoint); - *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); - } + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Opening)); + *st = PeerState::Enabled { connections }; + } - (PeerState::Incoming { .. }, _) => { - debug!(target: "sub-libp2p", - "Secondary connection {:?} to {} waiting for PSM decision.", - conn, peer_id); - }, + // Poisoned gets inserted above if the entry was missing. + // Ø | Backoff => Disabled + st @ &mut PeerState::Poisoned | + st @ &mut PeerState::Backoff { .. } => { + let backoff_until = if let PeerState::Backoff { timer_deadline, .. } = st { + Some(*timer_deadline) + } else { + None + }; + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}, {:?}): Not requested by PSM, disabling.", + peer_id, set_id, endpoint, *conn); - (PeerState::Enabled { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); - } + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Closed)); + *st = PeerState::Disabled { connections, backoff_until }; + } - (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); + // In all other states, add this new connection to the list of closed inactive + // connections. + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } | + PeerState::Enabled { connections, .. } => { + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.", + peer_id, set_id, endpoint, *conn); + connections.push((*conn, ConnectionState::Closed)); + } } } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", - conn, endpoint, peer_id); - match self.peers.get_mut(peer_id) { - Some(PeerState::Disabled { open, .. }) | - Some(PeerState::DisabledPendingEnable { open, .. }) | - Some(PeerState::Enabled { open, .. }) => { - // Check if the "link" to the peer is already considered closed, - // i.e. there is no connection that is open for custom protocols, - // in which case `CustomProtocolClosed` was already emitted. - let closed = open.is_empty(); - let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn); - open.retain(|(c, _)| c != conn); - if !closed { - if let Some((_, sink)) = open.get(0) { - if sink_closed { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: peer_id.clone(), - notifications_sink: sink.clone(), - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - } else { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _endpoint: &ConnectedPoint) { + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((peer_id.clone(), set_id)) { + entry + } else { + error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Disabled => Disabled | Backoff | Ø + PeerState::Disabled { mut connections, backoff_until } => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.", + peer_id, set_id, *conn); - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); } - } - } - _ => {} - } - } - fn inject_disconnected(&mut self, peer_id: &PeerId) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", - "`inject_disconnected` called for unknown peer {}", - peer_id), + if connections.is_empty() { + if let Some(until) = backoff_until { + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id, set_id) + }.boxed()); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: until, + }; + } else { + entry.remove(); + } + } else { + entry.remove(); + } + } else { + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; + } + }, - Some(PeerState::Disabled { open, banned_until, .. }) => { - if !open.is_empty() { - debug_assert!(false); - error!( + // DisabledPendingEnable => DisabledPendingEnable | Backoff + PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => { + debug!( target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id + "Libp2p => Disconnected({}, {:?}, {:?}): Disabled but pending enable.", + peer_id, set_id, *conn ); - } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); - } - } - Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => { - if !open.is_empty() { - debug_assert!(false); - error!( + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if connections.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; + + } else { + *entry.get_mut() = PeerState::DisabledPendingEnable { + connections, timer_deadline, timer + }; + } + }, + + // Incoming => Incoming | Disabled | Backoff | Ø + PeerState::Incoming { mut connections, backoff_until } => { + debug!( target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id + "Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.", + peer_id, set_id, *conn ); + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + } + + let no_desired_left = !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpenDesiredByRemote) + }); + + // If no connection is `OpenDesiredByRemote` anymore, clean up the peerset incoming + // request. + if no_desired_left { + // In the incoming state, we don't report "Dropped". Instead we will just + // ignore the corresponding Accept/Reject. + if let Some(state) = self.incoming.iter_mut() + .find(|i| i.alive && i.set_id == set_id && i.peer_id == *peer_id) + { + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming corresponding to an incoming state in peers"); + debug_assert!(false); + } + } + + if connections.is_empty() { + if let Some(until) = backoff_until { + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id, set_id) + }.boxed()); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: until, + }; + } else { + entry.remove(); + } + } else { + entry.remove(); + } + + } else if no_desired_left { + // If no connection is `OpenDesiredByRemote` anymore, switch to `Disabled`. + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; + } else { + *entry.get_mut() = PeerState::Incoming { connections, backoff_until }; + } } - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was disabled but pending enable.", - peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); - } - Some(PeerState::Enabled { open, .. }) => { - if !open.is_empty() { - debug_assert!(false); - error!( + // Enabled => Enabled | Backoff + // Peers are always backed-off when disconnecting while Enabled. + PeerState::Enabled { mut connections } => { + debug!( target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id + "Libp2p => Disconnected({}, {:?}, {:?}): Enabled.", + peer_id, set_id, *conn ); - } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); - self.peers.insert(peer_id.clone(), PeerState::Banned { - until: Instant::now() + Duration::from_secs(ban_dur) - }); - } - // In the incoming state, we don't report "Dropped". Instead we will just ignore the - // corresponding Accept/Reject. - Some(PeerState::Incoming { }) => { - if let Some(state) = self.incoming.iter_mut() - .find(|i| i.alive && i.peer_id == *peer_id) - { - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", - peer_id, state.incoming_id); - state.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ - corresponding to an incoming state in peers") + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + let (_, state) = connections.remove(pos); + if let ConnectionState::Open(_) = state { + if let Some((replacement_pos, replacement_sink)) = connections + .iter() + .enumerate() + .filter_map(|(num, (_, s))| { + match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None + } + }) + .next() + { + if pos <= replacement_pos { + debug!( + target: "sub-libp2p", + "External API <= Sink replaced({}, {:?})", + peer_id, set_id + ); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: peer_id.clone(), + set_id, + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } else { + debug!( + target: "sub-libp2p", "External API <= Closed({}, {:?})", + peer_id, set_id + ); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + set_id, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } + + } else { + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if connections.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(Duration::from_secs(ban_dur)); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id, set_id) + }.boxed()); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: Instant::now() + Duration::from_secs(ban_dur), + }; + + } else if !connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + + *entry.get_mut() = PeerState::Disabled { + connections, + backoff_until: None + }; + + } else { + *entry.get_mut() = PeerState::Enabled { connections }; + } } - } - Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), + PeerState::Requested | + PeerState::PendingRequest { .. } | + PeerState::Backoff { .. } => { + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", + "`inject_connection_closed` called for unknown peer {}", + peer_id); + debug_assert!(false); + }, + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id); + debug_assert!(false); + }, + } } } + fn inject_disconnected(&mut self, _peer_id: &PeerId) { + } + fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); } fn inject_dial_failure(&mut self, peer_id: &PeerId) { - if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // The peer is not in our list. - st @ PeerState::Banned { .. } => { - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + if let Entry::Occupied(mut entry) = self.peers.entry((peer_id.clone(), set_id)) { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // The peer is not in our list. + st @ PeerState::Backoff { .. } => { + *entry.into_mut() = st; + }, - // "Basic" situation: we failed to reach a peer that the peerset requested. - PeerState::Requested | PeerState::PendingRequest { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(5) - }; - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()) - }, + // "Basic" situation: we failed to reach a peer that the peerset requested. + st @ PeerState::Requested | + st @ PeerState::PendingRequest { .. } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + + let now = Instant::now(); + let ban_duration = match st { + PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now => + cmp::max(timer_deadline - now, Duration::from_secs(5)), + _ => Duration::from_secs(5) + }; - // We can still get dial failures even if we are already connected to the peer, - // as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(ban_duration); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id, set_id) + }.boxed()); + + *entry.into_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: now + ban_duration, + }; + }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } + // We can still get dial failures even if we are already connected to the peer, + // as an extra diagnostic for an earlier attempt. + st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | + st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { + *entry.into_mut() = st; + }, - } else { - // The peer is not in our list. - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id); + debug_assert!(false); + }, + } + } } } @@ -1127,123 +1443,268 @@ impl NetworkBehaviour for GenericProto { event: NotifsHandlerOut, ) { match event { - NotifsHandlerOut::Closed { endpoint, reason } => { + NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", - source, endpoint, reason); + "Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})", + source, connection, set_id); - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { entry } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + error!(target: "sub-libp2p", "OpenDesiredByRemote: State mismatch in the custom protos handler"); + debug_assert!(false); return }; - let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { mut open } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Incoming => Incoming + PeerState::Incoming { mut connections, backoff_until } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::OpenDesiredByRemote))); + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; + } else { + // Connections in `OpeningThenClosing` state are in a Closed phase, + // and as such can emit `OpenDesiredByRemote` messages. + // Since an `Open` and a `Close` messages have already been sent, + // there is nothing much that can be done about this anyway. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing + )); + } } else { - debug_assert!(false); error!( target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); + debug_assert!(false); } - // TODO: We switch the entire peer state to "disabled" because of possible - // race conditions involving the legacy substream. - // Once https://github.com/paritytech/substrate/issues/5670 is done, this - // should be changed to stay in the `Enabled` state. - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: source.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; + }, - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + source, connection, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; } else { - None - }); - - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - }; - - (last, new_notifications_sink) - }, - PeerState::Disabled { mut open, banned_until } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + // Connections in `OpeningThenClosing` and `Opening` are in a Closed + // phase, and as such can emit `OpenDesiredByRemote` messages. + // Since an `Open` message haS already been sent, there is nothing + // more to do. + debug_assert!(matches!( + connec_state, + ConnectionState::OpenDesiredByRemote | ConnectionState::Opening + )); + } } else { - debug_assert!(false); error!( target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); + debug_assert!(false); } - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); + *entry.into_mut() = PeerState::Enabled { connections }; + }, - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - }; + // Disabled => Disabled | Incoming + PeerState::Disabled { mut connections, backoff_until } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; - (last, new_notifications_sink) - }, - PeerState::DisabledPendingEnable { - mut open, - timer, - timer_deadline - } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 += 1; + + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(set_id, source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + set_id, + alive: true, + incoming_id, + }); + + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; + + } else { + // Connections in `OpeningThenClosing` are in a Closed phase, and + // as such can emit `OpenDesiredByRemote` messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing + )); + *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; + } } else { - debug_assert!(false); error!( target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); + debug_assert!(false); } + } + + // DisabledPendingEnable => Enabled | DisabledPendingEnable + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + source, connection, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source.clone(), + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + + *entry.into_mut() = PeerState::Enabled { connections }; - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) } else { - None - }); + // Connections in `OpeningThenClosing` are in a Closed phase, and + // as such can emit `OpenDesiredByRemote` messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing + )); + *entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer, + timer_deadline, + }; + } + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + } + } - *entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer, - timer_deadline + state => { + error!(target: "sub-libp2p", + "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + return + } + }; + } + + NotifsHandlerOut::CloseDesired { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + + debug!(target: "sub-libp2p", + "Handler({}, {:?}) => CloseDesired({:?})", + source, connection, set_id); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { + entry + } else { + error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Enabled => Enabled | Disabled + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + let pos = if let Some(pos) = connections.iter().position(|(c, _)| *c == connection) { + pos + } else { + error!(target: "sub-libp2p", + "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return; }; - (last, new_notifications_sink) + if matches!(connections[pos].1, ConnectionState::Closing) { + *entry.into_mut() = PeerState::Enabled { connections }; + return; + } + + debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); + connections[pos].1 = ConnectionState::Closing; + + debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Close({:?})", source, connection, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source.clone(), + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + + if let Some((replacement_pos, replacement_sink)) = connections + .iter() + .enumerate() + .filter_map(|(num, (_, s))| { + match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None + } + }) + .next() + { + if pos <= replacement_pos { + debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + set_id, + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + *entry.into_mut() = PeerState::Enabled { connections }; + + } else { + // List of open connections wasn't empty before but now it is. + if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) { + debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", source, set_id); + self.peerset.dropped(set_id, source.clone(), sc_peerset::DropReason::Refused); + *entry.into_mut() = PeerState::Disabled { + connections, backoff_until: None + }; + } else { + *entry.into_mut() = PeerState::Enabled { connections }; + } + + debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", source, set_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: source, + set_id, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + }, + + // All connections in `Disabled` and `DisabledPendingEnable` have been sent a + // `Close` message already, and as such ignore any `CloseDesired` message. + state @ PeerState::Disabled { .. } | + state @ PeerState::DisabledPendingEnable { .. } => { + *entry.into_mut() = state; + return; }, state => { error!(target: "sub-libp2p", @@ -1251,103 +1712,238 @@ impl NetworkBehaviour for GenericProto { state); return } - }; + } + } - if last { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - reason, - peer_id: source, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + NotifsHandlerOut::CloseResult { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); - } else { - if let Some(new_notifications_sink) = new_notifications_sink { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: source, - notifications_sink: new_notifications_sink, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + debug!(target: "sub-libp2p", + "Handler({}, {:?}) => CloseResult({:?})", + source, connection, set_id); + + match self.peers.get_mut(&(source.clone(), set_id)) { + // Move the connection from `Closing` to `Closed`. + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) | + Some(PeerState::Enabled { connections, .. }) => { + if let Some((_, connec_state)) = connections + .iter_mut() + .find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing)) + { + *connec_state = ConnectionState::Closed; + } else { + error!(target: "sub-libp2p", + "CloseResult: State mismatch in the custom protos handler"); + debug_assert!(false); + } + }, + + state => { + error!(target: "sub-libp2p", + "CloseResult: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); } - debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); } } - NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => { + NotifsHandlerOut::OpenResultOk { protocol_index, received_handshake, notifications_sink, .. } => { + let set_id = sc_peerset::SetId::from(protocol_index); debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} open for custom protocols.", - source, endpoint); - - let first = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, .. }) | - Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | - Some(PeerState::Disabled { ref mut open, .. }) => { - let first = open.is_empty(); - if !open.iter().any(|(c, _)| *c == connection) { - open.push((connection, notifications_sink.clone())); + "Handler({}, {:?}) => OpenResultOk({:?})", + source, connection, set_id); + + match self.peers.get_mut(&(source.clone(), set_id)) { + Some(PeerState::Enabled { connections, .. }) => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + let any_open = connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::Opening)) + { + if !any_open { + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = GenericProtoOut::CustomProtocolOpen { + peer_id: source, + set_id, + received_handshake, + notifications_sink: notifications_sink.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + *connec_state = ConnectionState::Open(notifications_sink); + } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; } else { - error!( - target: "sub-libp2p", - "State mismatch: connection with {} opened a second time", - source - ); + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + } + }, + + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + debug_assert!(false); } - first } + state => { error!(target: "sub-libp2p", - "Open: Unexpected state in the custom protos handler: {:?}", + "OpenResultOk: Unexpected state in the custom protos handler: {:?}", state); + debug_assert!(false); return } + } + } + + NotifsHandlerOut::OpenResultErr { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + debug!(target: "sub-libp2p", + "Handler({:?}, {:?}) => OpenResultErr({:?})", + source, connection, set_id); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { + entry + } else { + error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::Opening)) + { + *connec_state = ConnectionState::Closed; + } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if !connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(set_id, source.clone(), sc_peerset::DropReason::Refused); + + *entry.into_mut() = PeerState::Disabled { + connections, + backoff_until: None + }; + } else { + *entry.into_mut() = PeerState::Enabled { connections }; + } + }, + PeerState::Disabled { mut connections, backoff_until } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; + }, + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + *entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer, + timer_deadline, + }; + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + } }; + } - if first { - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { + NotifsHandlerOut::CustomMessage { message } => { + if self.is_open(&source, sc_peerset::SetId::from(0)) { // TODO: using set 0 here is hacky + trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::LegacyMessage { peer_id: source, - received_handshake, - notifications_sink + message, }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } else { - debug!( + trace!( target: "sub-libp2p", - "Handler({:?}) => Secondary connection opened custom protocol", - source + "Handler({:?}) => Post-close message. Dropping message.", + source, ); } } - NotifsHandlerOut::CustomMessage { message } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::LegacyMessage { - peer_id: source, - message, - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - - NotifsHandlerOut::Notification { protocol_name, message } => { - debug_assert!(self.is_open(&source)); - trace!( - target: "sub-libp2p", - "Handler({:?}) => Notification({:?})", - source, - protocol_name, - ); - trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); - let event = GenericProtoOut::Notification { - peer_id: source, - protocol_name, - message, - }; + NotifsHandlerOut::Notification { protocol_index, message } => { + let set_id = sc_peerset::SetId::from(protocol_index); + if self.is_open(&source, set_id) { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({}, {:?}, {} bytes)", + connection, + source, + set_id, + message.len() + ); + trace!(target: "sub-libp2p", "External API <= Message({}, {:?})", + source, set_id); + let event = GenericProtoOut::Notification { + peer_id: source, + set_id, + message, + }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Post-close notification({}, {:?}, {} bytes)", + connection, + source, + set_id, + message.len() + ); + } } } } @@ -1376,11 +1972,11 @@ impl NetworkBehaviour for GenericProto { Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { self.peerset_report_reject(index); } - Poll::Ready(Some(sc_peerset::Message::Connect(id))) => { - self.peerset_report_connect(id); + Poll::Ready(Some(sc_peerset::Message::Connect { peer_id, set_id, .. })) => { + self.peerset_report_connect(peer_id, set_id); } - Poll::Ready(Some(sc_peerset::Message::Drop(id))) => { - self.peerset_report_disconnect(id); + Poll::Ready(Some(sc_peerset::Message::Drop { peer_id, set_id, .. })) => { + self.peerset_report_disconnect(peer_id, set_id); } Poll::Ready(None) => { error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); @@ -1390,9 +1986,9 @@ impl NetworkBehaviour for GenericProto { } } - while let Poll::Ready(Some((delay_id, peer_id))) = + while let Poll::Ready(Some((delay_id, peer_id, set_id))) = Pin::new(&mut self.delays).poll_next(cx) { - let peer_state = match self.peers.get_mut(&peer_id) { + let peer_state = match self.peers.get_mut(&(peer_id.clone(), set_id)) { Some(s) => s, // We intentionally never remove elements from `delays`, and it may // thus contain peers which are now gone. This is a normal situation. @@ -1400,8 +1996,14 @@ impl NetworkBehaviour for GenericProto { }; match peer_state { + PeerState::Backoff { timer, .. } if *timer == delay_id => { + debug!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); + self.peers.remove(&(peer_id, set_id)); + } + PeerState::PendingRequest { timer, .. } if *timer == delay_id => { debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id, condition: DialPeerCondition::Disconnected @@ -1409,14 +2011,33 @@ impl NetworkBehaviour for GenericProto { *peer_state = PeerState::Requested; } - PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) }; + PeerState::DisabledPendingEnable { connections, timer, timer_deadline } + if *timer == delay_id => + { + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = connections.iter_mut() + .find(|(_, s)| matches!(s, ConnectionState::Closed)) + { + debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)", + peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + *peer_state = PeerState::Enabled { + connections: mem::replace(connections, Default::default()), + }; + } else { + *timer_deadline = Instant::now() + Duration::from_secs(5); + let delay = futures_timer::Delay::new(Duration::from_secs(5)); + let timer = *timer; + self.delays.push(async move { + delay.await; + (timer, peer_id, set_id) + }.boxed()); + } } // We intentionally never remove elements from `delays`, and it may diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 5845130a7db87..6fdcef1d7a2a1 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,12 +16,935 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -pub use self::group::{ - NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming +//! and outgoing substreams for all gossiping protocols. +//! +//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the +//! gossiping protocols that are Substrate-related and outside of the scope of libp2p. +//! +//! # Usage +//! +//! From an API perspective, for each of its protocols, the [`NotifsHandler`] is always in one of +//! the following state (see [`State`]): +//! +//! - Closed substream. This is the initial state. +//! - Closed substream, but remote desires them to be open. +//! - Open substream. +//! - Open substream, but remote desires them to be closed. +//! +//! Each protocol in the [`NotifsHandler`] can spontaneously switch between these states: +//! +//! - "Closed substream" to "Closed substream but open desired". When that happens, a +//! [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted. +//! - "Closed substream but open desired" to "Closed substream" (i.e. the remote has cancelled +//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. +//! - "Open substream" to "Open substream but close desired". When that happens, a +//! [`NotifsHandlerOut::CloseDesired`] is emitted. +//! +//! The user can instruct a protocol in the `NotifsHandler` to switch from "closed" to "open" or +//! vice-versa by sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The +//! `NotifsHandler` must answer with [`NotifsHandlerOut::OpenResultOk`] or +//! [`NotifsHandlerOut::OpenResultErr`], or with [`NotifsHandlerOut::CloseResult`]. +//! +//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the substream is now in the open state. +//! When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is emitted, +//! the `NotifsHandler` is now (or remains) in the closed state. +//! +//! When a [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted, the user should always send back +//! either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the +//! remote will be left in a pending state. +//! +//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted +//! [`NotifsHandlerIn::Open`] has gotten an answer. + +use crate::protocol::generic_proto::{ + upgrade::{ + NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, + NotificationsHandshakeError, RegisteredProtocol, RegisteredProtocolSubstream, + RegisteredProtocolEvent, UpgradeCollec + }, +}; + +use bytes::BytesMut; +use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use futures::{ + channel::mpsc, + lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, + prelude::* }; -pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; +use log::error; +use parking_lot::{Mutex, RwLock}; +use smallvec::SmallVec; +use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; +use wasm_timer::Instant; + +/// Number of pending notifications in asynchronous contexts. +/// See [`NotificationsSink::reserve_notification`] for context. +const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; + +/// Number of pending notifications in synchronous contexts. +const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; + +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); + +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandlerProto { + /// Name of protocols, prototypes for upgrades for inbound substreams, and the message we + /// send or respond with in the handshake. + protocols: Vec<(Cow<'static, str>, NotificationsIn, Arc>>, u64)>, + + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, +} + +/// The actual handler once the connection has been established. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandler { + /// List of notification protocols, specified by the user at initialization. + protocols: Vec, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + + /// Remote we are connected to. + peer_id: PeerId, + + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, + + /// The substreams where bidirectional communications happen. + legacy_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Contains substreams which are being shut down. + legacy_shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Events to return in priority from `poll`. + events_queue: VecDeque< + ProtocolsHandlerEvent + >, +} + +/// Fields specific for each individual protocol. +struct Protocol { + /// Name of the protocol. + name: Cow<'static, str>, + + /// Prototype for the inbound upgrade. + in_upgrade: NotificationsIn, + + /// Handshake to send when opening a substream or receiving an open request. + handshake: Arc>>, + + /// Maximum allowed size of individual notifications. + max_notification_size: u64, + + /// Current state of the substreams for this protocol. + state: State, +} + +/// See the module-level documentation to learn about the meaning of these variants. +enum State { + /// Protocol is in the "Closed" state. + Closed { + /// True if an outgoing substream is still in the process of being opened. + pending_opening: bool, + }, + + /// Protocol is in the "Closed" state. A [`NotifsHandlerOut::OpenDesiredByRemote`] has been + /// emitted. + OpenDesiredByRemote { + /// Substream opened by the remote and that hasn't been accepted/rejected yet. + in_substream: NotificationsInSubstream, + + /// See [`State::Closed::pending_opening`]. + pending_opening: bool, + }, + + /// Protocol is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is + /// consequently trying to open the various notifications substreams. + /// + /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must + /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + Opening { + /// Substream opened by the remote. If `Some`, has been accepted. + in_substream: Option>, + }, + + /// Protocol is in the "Open" state. + Open { + /// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been + /// sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from + /// the receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + notifications_sink_rx: stream::Select< + stream::Fuse>, + stream::Fuse> + >, + + /// Outbound substream that has been accepted by the remote. + /// + /// Always `Some` on transition to [`State::Open`]. Switched to `None` only if the remote + /// closed the substream. If `None`, a [`NotifsHandlerOut::CloseDesired`] event has been + /// emitted. + out_substream: Option>, + + /// Substream opened by the remote. + /// + /// Contrary to the `out_substream` field, operations continue as normal even if the + /// substream has been closed by the remote. A `None` is treated the same way as if there + /// was an idle substream. + in_substream: Option>, + }, +} + +impl IntoProtocolsHandler for NotifsHandlerProto { + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { + let protocols = self.protocols.iter() + .map(|(_, p, _, _)| p.clone()) + .collect::>(); + + SelectUpgrade::new(protocols, self.legacy_protocol.clone()) + } + + fn into_handler(self, peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsHandler { + protocols: self.protocols.into_iter().map(|(name, in_upgrade, handshake, max_size)| { + Protocol { + name, + in_upgrade, + handshake, + state: State::Closed { + pending_opening: false, + }, + max_notification_size: max_size, + } + }).collect(), + peer_id: peer_id.clone(), + endpoint: connected_point.clone(), + when_connection_open: Instant::now(), + legacy_protocol: self.legacy_protocol, + legacy_substreams: SmallVec::new(), + legacy_shutdown: SmallVec::new(), + events_queue: VecDeque::with_capacity(16), + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug, Clone)] +pub enum NotifsHandlerIn { + /// Instruct the handler to open the notification substreams. + /// + /// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a + /// [`NotifsHandlerOut::OpenResultErr`] event. + /// + /// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is + /// already in the fly. It is however possible if a `Close` is still in the fly. + Open { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// Instruct the handler to close the notification substreams, or reject any pending incoming + /// substream request. + /// + /// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event. + Close { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// Acknowledges a [`NotifsHandlerIn::Open`]. + OpenResultOk { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this out of the concern of this code. + received_handshake: Vec, + /// How notifications can be sent to this node. + notifications_sink: NotificationsSink, + }, + + /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open + /// notification substreams. + OpenResultErr { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// Acknowledges a [`NotifsHandlerIn::Close`]. + CloseResult { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a + /// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a + /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not + /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send + /// another [`NotifsHandlerIn`]. + OpenDesiredByRemote { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in + /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet + /// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send + /// another one. + CloseDesired { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// Received a non-gossiping message on the legacy substream. + /// + /// Can only happen when the handler is in the open state. + CustomMessage { + /// Message that has been received. + /// + /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a + /// notification. + message: BytesMut, + }, + + /// Received a message on a custom protocol substream. + /// + /// Can only happen when the handler is in the open state. + Notification { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + /// Message that has been received. + message: BytesMut, + }, +} + +/// Sink connected directly to the node background task. Allows sending notifications to the peer. +/// +/// Can be cloned in order to obtain multiple references to the substream of the same peer. +#[derive(Debug, Clone)] +pub struct NotificationsSink { + inner: Arc, +} + +#[derive(Debug)] +struct NotificationsSinkInner { + /// Target of the sink. + peer_id: PeerId, + /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. + async_channel: FuturesMutex>, + /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// This channel has a large capacity and is meant to be used in contexts where + /// back-pressure cannot be properly exerted. + /// It will be removed in a future version. + sync_channel: Mutex>, +} + +/// Message emitted through the [`NotificationsSink`] and processed by the background task +/// dedicated to the peer. +#[derive(Debug)] +enum NotificationsSinkMessage { + /// Message emitted by [`NotificationsSink::reserve_notification`] and + /// [`NotificationsSink::write_notification_now`]. + Notification { + message: Vec, + }, + + /// Must close the connection. + ForceClose, +} + +impl NotificationsSink { + /// Returns the [`PeerId`] the sink is connected to. + pub fn peer_id(&self) -> &PeerId { + &self.inner.peer_id + } + + /// Sends a notification to the peer. + /// + /// If too many messages are already buffered, the notification is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + /// + /// This method will be removed in a future version. + pub fn send_sync_notification<'a>( + &'a self, + message: impl Into> + ) { + let mut lock = self.inner.sync_channel.lock(); + let result = lock.try_send(NotificationsSinkMessage::Notification { + message: message.into() + }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Wait until the remote is ready to accept a notification. + /// + /// Returns an error in the case where the connection is closed. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + pub async fn reserve_notification<'a>(&'a self) -> Result, ()> { + let mut lock = self.inner.async_channel.lock().await; + + let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; + if poll_ready.is_ok() { + Ok(Ready { lock }) + } else { + Err(()) + } + } +} + +/// Notification slot is reserved and the notification can actually be sent. +#[must_use] +#[derive(Debug)] +pub struct Ready<'a> { + /// Guarded channel. The channel inside is guaranteed to not be full. + lock: FuturesMutexGuard<'a, mpsc::Sender>, +} + +impl<'a> Ready<'a> { + /// Consumes this slots reservation and actually queues the notification. + /// + /// Returns an error if the substream has been closed. + pub fn send( + mut self, + notification: impl Into> + ) -> Result<(), ()> { + self.lock.start_send(NotificationsSinkMessage::Notification { + message: notification.into(), + }).map_err(|_| ()) + } +} + +/// Error specific to the collection of protocols. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotifsHandlerError { + /// Channel of synchronous notifications is full. + SyncNotificationsClogged, +} + +impl NotifsHandlerProto { + /// Builds a new handler. + /// + /// `list` is a list of notification protocols names, the message to send as part of the + /// handshake, and the maximum allowed size of a notification. At the moment, the message + /// is always the same whether we open a substream ourselves or respond to handshake from + /// the remote. + pub fn new( + legacy_protocol: RegisteredProtocol, + list: impl Into, Arc>>, u64)>>, + ) -> Self { + let protocols = list + .into() + .into_iter() + .map(|(proto_name, msg, max_notif_size)| { + (proto_name.clone(), NotificationsIn::new(proto_name, max_notif_size), msg, max_notif_size) + }) + .collect(); + + NotifsHandlerProto { + protocols, + legacy_protocol, + } + } +} + +impl ProtocolsHandler for NotifsHandler { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = NotifsHandlerError; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = NotificationsOut; + // Index within the `out_protocols`. + type OutboundOpenInfo = usize; + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + let protocols = self.protocols.iter() + .map(|p| p.in_upgrade.clone()) + .collect::>(); + + let with_legacy = SelectUpgrade::new(protocols, self.legacy_protocol.clone()); + SubstreamProtocol::new(with_legacy, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output, + (): () + ) { + match out { + // Received notifications substream. + EitherOutput::First(((_remote_handshake, mut new_substream), protocol_index)) => { + let mut protocol_info = &mut self.protocols[protocol_index]; + match protocol_info.state { + State::Closed { pending_opening } => { + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index, + } + )); + + protocol_info.state = State::OpenDesiredByRemote { + in_substream: new_substream, + pending_opening, + }; + }, + State::OpenDesiredByRemote { .. } => { + // If a substream already exists, silently drop the new one. + // Note that we drop the substream, which will send an equivalent to a + // TCP "RST" to the remote and force-close the substream. It might + // seem like an unclean way to get rid of a substream. However, keep + // in mind that it is invalid for the remote to open multiple such + // substreams, and therefore sending a "RST" is the most correct thing + // to do. + return; + }, + State::Opening { ref mut in_substream, .. } | + State::Open { ref mut in_substream, .. } => { + if in_substream.is_some() { + // Same remark as above. + return; + } + + // Create `handshake_message` on a separate line to be sure that the + // lock is released as soon as possible. + let handshake_message = protocol_info.handshake.read().clone(); + new_substream.send_handshake(handshake_message); + *in_substream = Some(new_substream); + }, + }; + } + + // Received legacy substream. + EitherOutput::Second((substream, _handshake)) => { + // Note: while we awknowledge legacy substreams and handle incoming messages, + // it doesn't trigger any `OpenDesiredByRemote` event as a way to simplify the + // logic of this code. + // Since mid-2019, legacy substreams are supposed to be used at the same time as + // notifications substreams, and not in isolation. Nodes that open legacy + // substreams in isolation are considered deprecated. + if self.legacy_substreams.len() <= 4 { + self.legacy_substreams.push(substream); + } + }, + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake, substream): >::Output, + protocol_index: Self::OutboundOpenInfo + ) { + match self.protocols[protocol_index].state { + State::Closed { ref mut pending_opening } | + State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + debug_assert!(*pending_opening); + *pending_opening = false; + } + State::Open { .. } => { + error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); + debug_assert!(false); + } + State::Opening { ref mut in_substream } => { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + peer_id: self.peer_id.clone(), + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + self.protocols[protocol_index].state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substream: Some(substream), + in_substream: in_substream.take(), + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + protocol_index, + endpoint: self.endpoint.clone(), + received_handshake: handshake, + notifications_sink + } + )); + } + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Open { protocol_index } => { + let protocol_info = &mut self.protocols[protocol_index]; + match &mut protocol_info.state { + State::Closed { pending_opening } => { + if !*pending_opening { + let proto = NotificationsOut::new( + protocol_info.name.clone(), + protocol_info.handshake.read().clone(), + protocol_info.max_notification_size + ); + + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, protocol_index) + .with_timeout(OPEN_TIMEOUT), + }); + } + + protocol_info.state = State::Opening { + in_substream: None, + }; + }, + State::OpenDesiredByRemote { pending_opening, in_substream } => { + let handshake_message = protocol_info.handshake.read().clone(); + + if !*pending_opening { + let proto = NotificationsOut::new( + protocol_info.name.clone(), + handshake_message.clone(), + protocol_info.max_notification_size, + ); + + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, protocol_index) + .with_timeout(OPEN_TIMEOUT), + }); + } + + in_substream.send_handshake(handshake_message); + + // The state change is done in two steps because of borrowing issues. + let in_substream = match + mem::replace(&mut protocol_info.state, State::Opening { in_substream: None }) + { + State::OpenDesiredByRemote { in_substream, .. } => in_substream, + _ => unreachable!() + }; + protocol_info.state = State::Opening { + in_substream: Some(in_substream), + }; + }, + State::Opening { .. } | + State::Open { .. } => { + // As documented, it is forbidden to send an `Open` while there is already + // one in the fly. + error!(target: "sub-libp2p", "opening already-opened handler"); + debug_assert!(false); + }, + } + }, + + NotifsHandlerIn::Close { protocol_index } => { + for mut substream in self.legacy_substreams.drain(..) { + substream.shutdown(); + self.legacy_shutdown.push(substream); + } + + match self.protocols[protocol_index].state { + State::Open { .. } => { + self.protocols[protocol_index].state = State::Closed { + pending_opening: false, + }; + }, + State::Opening { .. } => { + self.protocols[protocol_index].state = State::Closed { + pending_opening: true, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr { + protocol_index, + } + )); + }, + State::OpenDesiredByRemote { pending_opening, .. } => { + self.protocols[protocol_index].state = State::Closed { + pending_opening, + }; + } + State::Closed { .. } => {}, + } + + self.events_queue.push_back( + ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult { + protocol_index, + }) + ); + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: usize, + _: ProtocolsHandlerUpgrErr + ) { + match self.protocols[num].state { + State::Closed { ref mut pending_opening } | + State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + debug_assert!(*pending_opening); + *pending_opening = false; + } + + State::Opening { .. } => { + self.protocols[num].state = State::Closed { + pending_opening: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr { + protocol_index: num, + } + )); + } + + // No substream is being open when already `Open`. + State::Open { .. } => debug_assert!(false), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + if !self.legacy_substreams.is_empty() { + return KeepAlive::Yes; + } + + // `Yes` if any protocol has some activity. + if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { + return KeepAlive::Yes; + } + + // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote + // to express desire to open substreams. + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + if let Some(ev) = self.events_queue.pop_front() { + return Poll::Ready(ev); + } + + for protocol_index in 0..self.protocols.len() { + // Poll inbound substreams. + // Inbound substreams being closed is always tolerated, except for the + // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. + match &mut self.protocols[protocol_index].state { + State::Closed { .. } | + State::Open { in_substream: None, .. } | + State::Opening { in_substream: None } => {} + + State::Open { in_substream: in_substream @ Some(_), .. } => { + match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { + Poll::Pending => {}, + Poll::Ready(Some(Ok(message))) => { + let event = NotifsHandlerOut::Notification { + protocol_index, + message, + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => + *in_substream = None, + } + } + + State::OpenDesiredByRemote { in_substream, pending_opening } => { + match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(void)) => match void {}, + Poll::Ready(Err(_)) => { + self.protocols[protocol_index].state = State::Closed { + pending_opening: *pending_opening, + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired { protocol_index } + )) + }, + } + } + + State::Opening { in_substream: in_substream @ Some(_), .. } => { + match NotificationsInSubstream::poll_process(Pin::new(in_substream.as_mut().unwrap()), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(void)) => match void {}, + Poll::Ready(Err(_)) => *in_substream = None, + } + } + } + + // Poll outbound substream. + match &mut self.protocols[protocol_index].state { + State::Open { out_substream: out_substream @ Some(_), .. } => { + match Sink::poll_flush(Pin::new(out_substream.as_mut().unwrap()), cx) { + Poll::Pending | Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => { + *out_substream = None; + let event = NotifsHandlerOut::CloseDesired { protocol_index }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)); + } + }; + } + + State::Closed { .. } | + State::Opening { .. } | + State::Open { out_substream: None, .. } | + State::OpenDesiredByRemote { .. } => {} + } + + if let State::Open { notifications_sink_rx, out_substream: Some(out_substream), .. } + = &mut self.protocols[protocol_index].state + { + loop { + // Before we poll the notifications sink receiver, check that the substream + // is ready to accept a message. + match out_substream.poll_ready_unpin(cx) { + Poll::Ready(_) => {}, + Poll::Pending => break + } + + // Now that all substreams are ready for a message, grab what to send. + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) | Poll::Pending => break, + }; + + match message { + NotificationsSinkMessage::Notification { message } => { + let _ = out_substream.start_send_unpin(message); + + // Calling `start_send_unpin` only queues the message. Actually + // emitting the message is done with `poll_flush`. In order to + // not introduce too much complexity, this flushing is done earlier + // in the body of this `poll()` method. As such, we schedule a task + // wake-up now in order to guarantee that `poll()` will be called + // again and the flush happening. + // At the time of the writing of this comment, a rewrite of this + // code is being planned. If you find this comment in the wild and + // the rewrite didn't happen, please consider a refactor. + cx.waker().wake_by_ref(); + } + NotificationsSinkMessage::ForceClose => { + return Poll::Ready( + ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) + ); + } + } + } + } + + // The legacy substreams are polled only if the state is `Open`. Otherwise, it would be + // possible to receive notifications that would need to get silently discarded. + if matches!(self.protocols[0].state, State::Open { .. }) { + for n in (0..self.legacy_substreams.len()).rev() { + let mut substream = self.legacy_substreams.swap_remove(n); + let poll_outcome = Pin::new(&mut substream).poll_next(cx); + match poll_outcome { + Poll::Pending => self.legacy_substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + self.legacy_substreams.push(substream); + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )) + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged + )) + } + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { + if matches!(poll_outcome, Poll::Ready(None)) { + self.legacy_shutdown.push(substream); + } + + if let State::Open { out_substream, .. } = &mut self.protocols[0].state { + if !out_substream.is_some() { + *out_substream = None; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired { + protocol_index: 0, + } + )) + } + } + } + } + } + } + } + + shutdown_list(&mut self.legacy_shutdown, cx); + + Poll::Pending + } +} -mod group; -mod legacy; -mod notif_in; -mod notif_out; +/// Given a list of substreams, tries to shut them down. The substreams that have been successfully +/// shut down are removed from the list. +fn shutdown_list + (list: &mut SmallVec>>, + cx: &mut Context) +{ + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_))) => {} + Poll::Pending => break, + Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, + } + } + list.push(substream); + } +} diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs deleted file mode 100644 index fbfdb1cb6ab0e..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ /dev/null @@ -1,737 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming -//! and outgoing substreams for all gossiping protocols together. -//! -//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the -//! protocols that are Substrate-related and outside of the scope of libp2p. -//! -//! # Usage -//! -//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`. -//! -//! The `Initial` state is the state that the handler initially is in. It is a temporary state -//! during which the user must either enable or disable the handler. After that, the handler stays -//! either enabled or disabled. -//! -//! On the wire, we try to open the following substreams: -//! -//! - One substream for each notification protocol passed as parameter to the -//! `NotifsHandlerProto::new` function. -//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback -//! in case the notification protocol can't be opened. -//! -//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the -//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close -//! (or abort opening) all these substreams. It is intended that in the future we allow states in -//! which some protocols are open and not others. Symmetrically, we allow incoming -//! Substrate-related substreams if and only if we are in the `Enabled` state. -//! -//! The user has the choice between sending a message with `SendNotification`, to send a -//! notification, and `SendLegacy`, to send any other kind of message. -//! - -use crate::protocol::generic_proto::{ - handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, - handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, - handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, - upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, -}; - -use bytes::BytesMut; -use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use futures::{ - channel::mpsc, - lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, - prelude::* -}; -use log::{debug, error}; -use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, str, sync::Arc, task::{Context, Poll}}; - -/// Number of pending notifications in asynchronous contexts. -/// See [`NotificationsSink::reserve_notification`] for context. -const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; -/// Number of pending notifications in synchronous contexts. -const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsHandler`]. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandlerProto { - /// Prototypes for handlers for inbound substreams, and the message we respond with in the - /// handshake. - in_handlers: Vec<(NotifsInHandlerProto, Arc>>)>, - - /// Prototypes for handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandlerProto, Arc>>)>, - - /// Prototype for handler for backwards-compatibility. - legacy: LegacyProtoHandlerProto, -} - -/// The actual handler once the connection has been established. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandler { - /// Handlers for inbound substreams, and the message we respond with in the handshake. - in_handlers: Vec<(NotifsInHandler, Arc>>)>, - - /// Handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandler, Arc>>)>, - - /// Whether we are the connection dialer or listener. - endpoint: ConnectedPoint, - - /// Handler for backwards-compatibility. - legacy: LegacyProtoHandler, - - /// In the situation where either the legacy substream has been opened or the handshake-bearing - /// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`] - /// event yet, this contains the received handshake waiting to be reported through the - /// external API. - pending_handshake: Option>, - - /// State of this handler. - enabled: EnabledState, - - /// If we receive inbound substream requests while in initialization mode, - /// we push the corresponding index here and process them when the handler - /// gets enabled/disabled. - pending_in: Vec, - - /// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has - /// been sent out. The notifications to send out can be pulled from this receivers. - /// We use two different channels in order to have two different channel sizes, but from the - /// receiving point of view, the two channels are the same. - /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. - /// - /// Contains `Some` if and only if it has been reported to the user that the substreams are - /// open. - notifications_sink_rx: Option< - stream::Select< - stream::Fuse>, - stream::Fuse> - > - >, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum EnabledState { - Initial, - Enabled, - Disabled, -} - -impl IntoProtocolsHandler for NotifsHandlerProto { - type Handler = NotifsHandler; - - fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.inbound_protocol()) - .collect::>(); - - SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) - } - - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - NotifsHandler { - in_handlers: self.in_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - out_handlers: self.out_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - endpoint: connected_point.clone(), - legacy: self.legacy.into_handler(remote_peer_id, connected_point), - pending_handshake: None, - enabled: EnabledState::Initial, - pending_in: Vec::new(), - notifications_sink_rx: None, - } - } -} - -/// Event that can be received by a `NotifsHandler`. -#[derive(Debug, Clone)] -pub enum NotifsHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, -} - -/// Event that can be emitted by a `NotifsHandler`. -#[derive(Debug)] -pub enum NotifsHandlerOut { - /// The connection is open for custom protocols. - Open { - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, - /// Handshake that was sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - /// How notifications can be sent to this node. - notifications_sink: NotificationsSink, - }, - - /// The connection is closed for custom protocols. - Closed { - /// The reason for closing, for diagnostic purposes. - reason: Cow<'static, str>, - /// The endpoint of the connection that closed for custom protocols. - endpoint: ConnectedPoint, - }, - - /// Received a non-gossiping message on the legacy substream. - CustomMessage { - /// Message that has been received. - /// - /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a - /// notification. - message: BytesMut, - }, - - /// Received a message on a custom protocol substream. - Notification { - /// Name of the protocol of the message. - protocol_name: Cow<'static, str>, - - /// Message that has been received. - message: BytesMut, - }, -} - -/// Sink connected directly to the node background task. Allows sending notifications to the peer. -/// -/// Can be cloned in order to obtain multiple references to the same peer. -#[derive(Debug, Clone)] -pub struct NotificationsSink { - inner: Arc, -} - -#[derive(Debug)] -struct NotificationsSinkInner { - /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. - async_channel: FuturesMutex>, - /// Sender to use in synchronous contexts. Uses a synchronous mutex. - /// This channel has a large capacity and is meant to be used in contexts where - /// back-pressure cannot be properly exerted. - /// It will be removed in a future version. - sync_channel: Mutex>, -} - -/// Message emitted through the [`NotificationsSink`] and processed by the background task -/// dedicated to the peer. -#[derive(Debug)] -enum NotificationsSinkMessage { - /// Message emitted by [`NotificationsSink::reserve_notification`] and - /// [`NotificationsSink::write_notification_now`]. - Notification { - protocol_name: Cow<'static, str>, - message: Vec, - }, - - /// Must close the connection. - ForceClose, -} - -impl NotificationsSink { - /// Sends a notification to the peer. - /// - /// If too many messages are already buffered, the notification is silently discarded and the - /// connection to the peer will be closed shortly after. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - /// - /// This method will be removed in a future version. - pub fn send_sync_notification<'a>( - &'a self, - protocol_name: Cow<'static, str>, - message: impl Into> - ) { - let mut lock = self.inner.sync_channel.lock(); - let result = lock.try_send(NotificationsSinkMessage::Notification { - protocol_name, - message: message.into() - }); - - if result.is_err() { - // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore that `try_send` will succeed. - let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); - debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); - } - } - - /// Wait until the remote is ready to accept a notification. - /// - /// Returns an error in the case where the connection is closed. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { - let mut lock = self.inner.async_channel.lock().await; - - let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; - if poll_ready.is_ok() { - Ok(Ready { protocol_name: protocol_name, lock }) - } else { - Err(()) - } - } -} - -/// Notification slot is reserved and the notification can actually be sent. -#[must_use] -#[derive(Debug)] -pub struct Ready<'a> { - /// Guarded channel. The channel inside is guaranteed to not be full. - lock: FuturesMutexGuard<'a, mpsc::Sender>, - /// Name of the protocol. Should match one of the protocols passed at initialization. - protocol_name: Cow<'static, str>, -} - -impl<'a> Ready<'a> { - /// Consumes this slots reservation and actually queues the notification. - /// - /// Returns an error if the substream has been closed. - pub fn send( - mut self, - notification: impl Into> - ) -> Result<(), ()> { - self.lock.start_send(NotificationsSinkMessage::Notification { - protocol_name: self.protocol_name, - message: notification.into(), - }).map_err(|_| ()) - } -} - -/// Error specific to the collection of protocols. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum NotifsHandlerError { - /// Channel of synchronous notifications is full. - SyncNotificationsClogged, - /// Error in legacy protocol. - Legacy(::Error), -} - -impl NotifsHandlerProto { - /// Builds a new handler. - /// - /// `list` is a list of notification protocols names, and the message to send as part of the - /// handshake. At the moment, the message is always the same whether we open a substream - /// ourselves or respond to handshake from the remote. - /// - /// The first protocol in `list` is special-cased as the protocol that contains the handshake - /// to report through the [`NotifsHandlerOut::Open`] event. - /// - /// # Panic - /// - /// - Panics if `list` is empty. - /// - pub fn new( - legacy: RegisteredProtocol, - list: impl Into, Arc>>)>>, - ) -> Self { - let list = list.into(); - assert!(!list.is_empty()); - - let out_handlers = list - .clone() - .into_iter() - .map(|(proto_name, initial_message)| { - (NotifsOutHandlerProto::new(proto_name), initial_message) - }).collect(); - - let in_handlers = list.clone() - .into_iter() - .map(|(proto_name, msg)| (NotifsInHandlerProto::new(proto_name), msg)) - .collect(); - - NotifsHandlerProto { - in_handlers, - out_handlers, - legacy: LegacyProtoHandlerProto::new(legacy), - } - } -} - -impl ProtocolsHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; - type Error = NotifsHandlerError; - type InboundProtocol = SelectUpgrade, RegisteredProtocol>; - type OutboundProtocol = NotificationsOut; - // Index within the `out_handlers` - type OutboundOpenInfo = usize; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.listen_protocol().into_upgrade().1) - .collect::>(); - - let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); - SubstreamProtocol::new(proto, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - (): () - ) { - match out { - EitherOutput::First((out, num)) => - self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()), - EitherOutput::Second(out) => - self.legacy.inject_fully_negotiated_inbound(out, ()), - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - num: Self::OutboundOpenInfo - ) { - self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()) - } - - fn inject_event(&mut self, message: NotifsHandlerIn) { - match message { - NotifsHandlerIn::Enable => { - if let EnabledState::Enabled = self.enabled { - debug!("enabling already-enabled handler"); - } - self.enabled = EnabledState::Enabled; - self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for (handler, initial_message) in &mut self.out_handlers { - // We create `initial_message` on a separate line to be sure that the lock - // is released as soon as possible. - let initial_message = initial_message.read().clone(); - handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message, - }); - } - for num in self.pending_in.drain(..) { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = self.in_handlers[num].1.read().clone(); - self.in_handlers[num].0 - .inject_event(NotifsInHandlerIn::Accept(handshake_message)); - } - }, - NotifsHandlerIn::Disable => { - if let EnabledState::Disabled = self.enabled { - debug!("disabling already-disabled handler"); - } - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); - // The notifications protocols start in the disabled state. If we were in the - // "Initial" state, then we shouldn't disable the notifications protocols again. - if self.enabled != EnabledState::Initial { - for (handler, _) in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } - } - self.enabled = EnabledState::Disabled; - for num in self.pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); - } - }, - } - } - - fn inject_dial_upgrade_error( - &mut self, - num: usize, - err: ProtocolsHandlerUpgrErr - ) { - match err { - ProtocolsHandlerUpgrErr::Timeout => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timeout - ), - ProtocolsHandlerUpgrErr::Timer => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timer - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) - ), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - // Iterate over each handler and return the maximum value. - - let mut ret = self.legacy.connection_keep_alive(); - if ret.is_yes() { - return KeepAlive::Yes; - } - - for (handler, _) in &self.in_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - for (handler, _) in &self.out_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - ret - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx { - 'poll_notifs_sink: loop { - // Before we poll the notifications sink receiver, check that all the notification - // channels are ready to send a message. - // TODO: it is planned that in the future we switch to one `NotificationsSink` per - // protocol, in which case each sink should wait only for its corresponding handler - // to be ready, and not all handlers - // see https://github.com/paritytech/substrate/issues/5670 - for (out_handler, _) in &mut self.out_handlers { - match out_handler.poll_ready(cx) { - Poll::Ready(_) => {}, - Poll::Pending => break 'poll_notifs_sink, - } - } - - let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) | Poll::Pending => break, - }; - - match message { - NotificationsSinkMessage::Notification { - protocol_name, - message - } => { - let mut found_any_with_name = false; - - for (handler, _) in &mut self.out_handlers { - if *handler.protocol_name() == protocol_name { - found_any_with_name = true; - if handler.is_open() { - handler.send_or_discard(message); - continue 'poll_notifs_sink; - } - } - } - - // This code can be reached via the following scenarios: - // - // - User tried to send a notification on a non-existing protocol. This - // most likely relates to https://github.com/paritytech/substrate/issues/6827 - // - User tried to send a notification to a peer we're not or no longer - // connected to. This happens in a normal scenario due to the racy nature - // of connections and disconnections, and is benign. - // - // We print a warning in the former condition. - if !found_any_with_name { - log::warn!( - target: "sub-libp2p", - "Tried to send a notification on non-registered protocol: {:?}", - protocol_name - ); - } - } - NotificationsSinkMessage::ForceClose => { - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); - } - } - } - } - - // If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing - // substream (either the legacy substream or the one special-cased as providing the - // handshake) is open but the user isn't aware yet of the substreams being open. - // When that is the case, neither the legacy substream nor the incoming notifications - // substreams should be polled, otherwise there is a risk of receiving messages from them. - if self.pending_handshake.is_none() { - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, .. } => - match *protocol.info() {}, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { - received_handshake, - .. - }) => { - if self.notifications_sink_rx.is_none() { - debug_assert!(self.pending_handshake.is_none()); - self.pending_handshake = Some(received_handshake); - } - cx.waker().wake_by_ref(); - return Poll::Pending; - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { - // We consciously drop the receivers despite notifications being potentially - // still buffered up. - self.notifications_sink_rx = None; - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )) - }, - ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), - } - } - } - - for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { - loop { - let poll = if self.notifications_sink_rx.is_some() { - handler.poll(cx) - } else { - handler.poll_process(cx) - }; - - let ev = match poll { - Poll::Ready(e) => e, - Poll::Pending => break, - }; - - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => - error!("Incoming substream handler tried to open a substream"), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match self.enabled { - EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = handshake_message.read().clone(); - handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) - }, - EnabledState::Disabled => - handler.inject_event(NotifsInHandlerIn::Refuse), - }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - debug_assert!(self.pending_handshake.is_none()); - if self.notifications_sink_rx.is_some() { - let msg = NotifsHandlerOut::Notification { - message, - protocol_name: handler.protocol_name().clone(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); - } - }, - } - } - } - - for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_info(|()| handler_num), - }), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - - // Opened substream on the handshake-bearing notification protocol. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) - if handler_num == 0 => - { - if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() { - self.pending_handshake = Some(handshake); - } - }, - - // Nothing to do in response to other notification substreams being opened - // or closed. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, - } - } - } - - if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { - if let Some(handshake) = self.pending_handshake.take() { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), - }), - }; - - debug_assert!(self.notifications_sink_rx.is_none()); - self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { - endpoint: self.endpoint.clone(), - received_handshake: handshake, - notifications_sink - } - )) - } - } - - Poll::Pending - } -} diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs deleted file mode 100644 index 404093553785c..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use futures_timer::Delay; -use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, error}; -use smallvec::{smallvec, SmallVec}; -use std::{borrow::Cow, collections::VecDeque, convert::Infallible, error, fmt, io, mem}; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific -/// to Substrate on that single connection. -/// -/// Note that there can be multiple instance of this struct simultaneously for same peer, -/// if there are multiple established connections to the peer. -/// -/// ## State of the handler -/// -/// There are six possible states for the handler: -/// -/// - Enabled and open, which is a normal operation. -/// - Enabled and closed, in which case it will try to open substreams. -/// - Disabled and open, in which case it will try to close substreams. -/// - Disabled and closed, in which case the handler is idle. The connection will be -/// garbage-collected after a few seconds if nothing more happens. -/// - Initializing and open. -/// - Initializing and closed, which is the state the handler starts in. -/// -/// The Init/Enabled/Disabled state is entirely controlled by the user by sending `Enable` or -/// `Disable` messages to the handler. The handler itself never transitions automatically between -/// these states. For example, if the handler reports a network misbehaviour, it will close the -/// substreams but it is the role of the user to send a `Disabled` event if it wants the connection -/// to close. Otherwise, the handler will try to reopen substreams. -/// -/// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled -/// as soon as possible. -/// -/// The Open/Closed state is decided by the handler and is reported with the `CustomProtocolOpen` -/// and `CustomProtocolClosed` events. The `CustomMessage` event can only be generated if the -/// handler is open. -/// -/// ## How it works -/// -/// When the handler is created, it is initially in the `Init` state and waits for either a -/// `Disable` or an `Enable` message from the outer layer. At any time, the outer layer is free to -/// toggle the handler between the disabled and enabled states. -/// -/// When the handler switches to "enabled", it opens a substream and negotiates the protocol named -/// `/substrate/xxx`, where `xxx` is chosen by the user and depends on the chain. -/// -/// For backwards compatibility reasons, when we switch to "enabled" for the first time (while we -/// are still in "init" mode) and we are the connection listener, we don't open a substream. -/// -/// In order the handle the situation where both the remote and us get enabled at the same time, -/// we tolerate multiple substreams open at the same time. Messages are transmitted on an arbitrary -/// substream. The endpoints don't try to agree on a single substream. -/// -/// We consider that we are now "closed" if the remote closes all the existing substreams. -/// Re-opening it can then be performed by closing all active substream and re-opening one. -/// -pub struct LegacyProtoHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, -} - -impl LegacyProtoHandlerProto { - /// Builds a new `LegacyProtoHandlerProto`. - pub fn new(protocol: RegisteredProtocol) -> Self { - LegacyProtoHandlerProto { - protocol, - } - } -} - -impl IntoProtocolsHandler for LegacyProtoHandlerProto { - type Handler = LegacyProtoHandler; - - fn inbound_protocol(&self) -> RegisteredProtocol { - self.protocol.clone() - } - - fn into_handler(self, remote_peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { - LegacyProtoHandler { - protocol: self.protocol, - remote_peer_id: remote_peer_id.clone(), - state: ProtocolState::Init { - substreams: SmallVec::new(), - init_deadline: Delay::new(Duration::from_secs(20)) - }, - events_queue: VecDeque::new(), - } - } -} - -/// The actual handler once the connection has been established. -pub struct LegacyProtoHandler { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, - - /// State of the communications with the remote. - state: ProtocolState, - - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - remote_peer_id: PeerId, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque< - ProtocolsHandlerEvent - >, -} - -/// State of the handler. -enum ProtocolState { - /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. - Init { - /// List of substreams opened by the remote but that haven't been processed yet. - /// For each substream, also includes the handshake message that we have received. - substreams: SmallVec<[(RegisteredProtocolSubstream, Vec); 6]>, - /// Deadline after which the initialization is abnormally long. - init_deadline: Delay, - }, - - /// Handler is ready to accept incoming substreams. - /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. - Opening, - - /// Normal operating mode. Contains the substreams that are open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - Normal { - /// The substreams where bidirectional communications happen. - substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - }, - - /// We are disabled. Contains substreams that are being closed. - /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the - /// outside or we have never sent any `CustomProtocolOpen` in the first place. - Disabled { - /// List of substreams to shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, - - /// If true, we should reactivate the handler after all the substreams in `shutdown` have - /// been closed. - /// - /// Since we don't want to mix old and new substreams, we wait for all old substreams to - /// be closed before opening any new one. - reenable: bool, - }, - - /// In this state, we don't care about anything anymore and need to kill the connection as soon - /// as possible. - KillAsap, - - /// We sometimes temporarily switch to this state during processing. If we are in this state - /// at the beginning of a method, that means something bad happened in the source code. - Poisoned, -} - -/// Event that can be received by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, -} - -/// Event that can be emitted by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// Handshake message that has been sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Reason why the substream closed, for diagnostic purposes. - reason: Cow<'static, str>, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Message that has been received. - message: BytesMut, - }, -} - -impl LegacyProtoHandler { - /// Enables the handler. - fn enable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: mut incoming, .. } => { - if incoming.is_empty() { - ProtocolState::Opening - } else { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].0.protocol_version(), - received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: incoming.into_iter().map(|(s, _)| s).collect(), - shutdown: SmallVec::new() - } - } - } - - st @ ProtocolState::KillAsap => st, - st @ ProtocolState::Opening { .. } => st, - st @ ProtocolState::Normal { .. } => st, - ProtocolState::Disabled { shutdown, .. } => { - ProtocolState::Disabled { shutdown, reenable: true } - } - } - } - - /// Disables the handler. - fn disable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: shutdown, .. } => { - let mut shutdown = shutdown.into_iter().map(|(s, _)| s).collect::>(); - for s in &mut shutdown { - s.shutdown(); - } - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => - // At the moment, if we get disabled while things were working, we kill the entire - // connection in order to force a reset of the state. - // This is obviously an extremely shameful way to do things, but at the time of - // the writing of this comment, the networking works very poorly and a solution - // needs to be found. - ProtocolState::KillAsap, - - ProtocolState::Disabled { shutdown, .. } => - ProtocolState::Disabled { shutdown, reenable: false }, - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - /// Polls the state for events. Optionally returns an event to produce. - #[must_use] - fn poll_state(&mut self, cx: &mut Context) - -> Option> { - match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - self.state = ProtocolState::Poisoned; - None - } - - ProtocolState::Init { substreams, mut init_deadline } => { - match Pin::new(&mut init_deadline).poll(cx) { - Poll::Ready(()) => { - error!(target: "sub-libp2p", "Handler initialization process is too long \ - with {:?}", self.remote_peer_id); - self.state = ProtocolState::KillAsap; - }, - Poll::Pending => { - self.state = ProtocolState::Init { substreams, init_deadline }; - } - } - - None - } - - ProtocolState::Opening => { - self.state = ProtocolState::Opening; - None - } - - ProtocolState::Normal { mut substreams, mut shutdown } => { - for n in (0..substreams.len()).rev() { - let mut substream = substreams.swap_remove(n); - match Pin::new(&mut substream).poll_next(cx) { - Poll::Pending => substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = LegacyProtoHandlerOut::CustomMessage { - message - }; - substreams.push(substream); - self.state = ProtocolState::Normal { substreams, shutdown }; - return Some(ProtocolsHandlerEvent::Custom(event)); - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "Legacy substream clogged".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(None) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "All substreams have been closed by the remote".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(Some(Err(err))) => { - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: format!("Error on the last substream: {:?}", err).into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } else { - debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); - } - } - } - } - - // This code is reached is none if and only if none of the substreams are in a ready state. - self.state = ProtocolState::Normal { substreams, shutdown }; - None - } - - ProtocolState::Disabled { mut shutdown, reenable } => { - shutdown_list(&mut shutdown, cx); - // If `reenable` is `true`, that means we should open the substreams system again - // after all the substreams are closed. - if reenable && shutdown.is_empty() { - self.state = ProtocolState::Opening; - } else { - self.state = ProtocolState::Disabled { shutdown, reenable }; - } - None - } - - ProtocolState::KillAsap => None, - } - } -} - -impl ProtocolsHandler for LegacyProtoHandler { - type InEvent = LegacyProtoHandlerIn; - type OutEvent = LegacyProtoHandlerOut; - type Error = ConnectionKillError; - type InboundProtocol = RegisteredProtocol; - type OutboundProtocol = RegisteredProtocol; - type OutboundOpenInfo = Infallible; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (mut substream, received_handshake): >::Output, - (): () - ) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { mut substreams, init_deadline } => { - if substream.endpoint() == Endpoint::Dialer { - error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ - initialization", self.remote_peer_id); - } - substreams.push((substream, received_handshake)); - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { .. } => { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version(), - received_handshake, - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: smallvec![substream], - shutdown: SmallVec::new() - } - } - - ProtocolState::Normal { substreams: mut existing, shutdown } => { - existing.push(substream); - ProtocolState::Normal { substreams: existing, shutdown } - } - - ProtocolState::Disabled { mut shutdown, .. } => { - substream.shutdown(); - shutdown.push(substream); - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - fn inject_fully_negotiated_outbound( - &mut self, - _: >::Output, - unreachable: Self::OutboundOpenInfo - ) { - match unreachable {} - } - - fn inject_event(&mut self, message: LegacyProtoHandlerIn) { - match message { - LegacyProtoHandlerIn::Disable => self.disable(), - LegacyProtoHandlerIn::Enable => self.enable(), - } - } - - fn inject_dial_upgrade_error( - &mut self, - unreachable: Self::OutboundOpenInfo, - _: ProtocolsHandlerUpgrErr - ) { - match unreachable {} - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - ProtocolState::Init { .. } | ProtocolState::Normal { .. } => KeepAlive::Yes, - ProtocolState::Opening { .. } | ProtocolState::Disabled { .. } | - ProtocolState::Poisoned | ProtocolState::KillAsap => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - // Kill the connection if needed. - if let ProtocolState::KillAsap = self.state { - return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); - } - - // Process all the substreams. - if let Some(event) = self.poll_state(cx) { - return Poll::Ready(event) - } - - Poll::Pending - } -} - -impl fmt::Debug for LegacyProtoHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("LegacyProtoHandler") - .finish() - } -} - -/// Given a list of substreams, tries to shut them down. The substreams that have been successfully -/// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>, - cx: &mut Context) -{ - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_))) => {} - Poll::Pending => break, - Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, - } - } - list.push(substream); - } -} - -/// Error returned when switching from normal to disabled. -#[derive(Debug)] -pub struct ConnectionKillError; - -impl error::Error for ConnectionKillError { -} - -impl fmt::Display for ConnectionKillError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Connection kill when switching from normal to disabled") - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs deleted file mode 100644 index d3b505e0de3e2..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ /dev/null @@ -1,293 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing -//! substreams for a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{error, warn}; -use std::{borrow::Cow, collections::VecDeque, fmt, pin::Pin, task::{Context, Poll}}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsInHandler`]. -pub struct NotifsInHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - in_protocol: NotificationsIn, -} - -/// The actual handler once the connection has been established. -pub struct NotifsInHandler { - /// Configuration for the protocol upgrade to negotiate for inbound substreams. - in_protocol: NotificationsIn, - - /// Substream that is open with the remote. - substream: Option>, - - /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and - /// `Closed` messages in a row without the handler having time to respond with `Accept` or - /// `Refuse`. - /// - /// In order to keep the state consistent, we increment this variable every time an - /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. - pending_accept_refuses: usize, - - /// Queue of events to send to the outside. - /// - /// This queue is only ever modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Event that can be received by a `NotifsInHandler`. -#[derive(Debug, Clone)] -pub enum NotifsInHandlerIn { - /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send - /// to the remote. - /// - /// After sending this to the handler, the substream is now considered open and `Notif` events - /// can be received. - Accept(Vec), - - /// Can be sent back as a response to an `OpenRequest`. - Refuse, -} - -/// Event that can be emitted by a `NotifsInHandler`. -#[derive(Debug)] -pub enum NotifsInHandlerOut { - /// The remote wants to open a substream. Contains the initial message sent by the remote - /// when the substream has been opened. - /// - /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent - /// back even if a `Closed` is received. - OpenRequest(Vec), - - /// The notifications substream has been closed by the remote. In order to avoid race - /// conditions, this does **not** cancel any previously-sent `OpenRequest`. - Closed, - - /// Received a message on the notifications substream. - /// - /// Can only happen after an `Accept` and before a `Closed`. - Notif(BytesMut), -} - -impl NotifsInHandlerProto { - /// Builds a new `NotifsInHandlerProto`. - pub fn new( - protocol_name: impl Into> - ) -> Self { - NotifsInHandlerProto { - in_protocol: NotificationsIn::new(protocol_name), - } - } -} - -impl IntoProtocolsHandler for NotifsInHandlerProto { - type Handler = NotifsInHandler; - - fn inbound_protocol(&self) -> NotificationsIn { - self.in_protocol.clone() - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsInHandler { - in_protocol: self.in_protocol, - substream: None, - pending_accept_refuses: 0, - events_queue: VecDeque::new(), - } - } -} - -impl NotifsInHandler { - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &Cow<'static, str> { - self.in_protocol.protocol_name() - } - - /// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to - /// never generate [`NotifsInHandlerOut::Notif`]. - /// - /// Use this method in situations where it is not desirable to receive events but still - /// necessary to drive any potential incoming handshake or request. - pub fn poll_process( - &mut self, - cx: &mut Context - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Ok(v))) => match v {}, - Some(Poll::Ready(Err(_))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl ProtocolsHandler for NotifsInHandler { - type InEvent = NotifsInHandlerIn; - type OutEvent = NotifsInHandlerOut; - type Error = void::Void; - type InboundProtocol = NotificationsIn; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.in_protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (msg, proto): >::Output, - (): () - ) { - // If a substream already exists, we drop it and replace it with the new incoming one. - if self.substream.is_some() { - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - } - - // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" - // to the remote and force-close the substream. It might seem like an unclean way to get - // rid of a substream. However, keep in mind that it is invalid for the remote to open - // multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do. - self.substream = Some(proto); - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); - self.pending_accept_refuses = self.pending_accept_refuses - .checked_add(1) - .unwrap_or_else(|| { - error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); - usize::max_value() - }); - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - _: Self::OutboundOpenInfo - ) { - // We never emit any outgoing substream. - void::unreachable(out) - } - - fn inject_event(&mut self, message: NotifsInHandlerIn) { - self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { - Some(v) => v, - None => { - error!( - target: "sub-libp2p", - "Inconsistent state: received Accept/Refuse when no pending request exists" - ); - return; - } - }; - - // If we send multiple `OpenRequest`s in a row, we will receive back multiple - // `Accept`/`Refuse` messages. All of them are obsolete except the last one. - if self.pending_accept_refuses != 0 { - return; - } - - match (message, self.substream.as_mut()) { - (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), - (NotifsInHandlerIn::Accept(_), None) => {}, - (NotifsInHandlerIn::Refuse, _) => self.substream = None, - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.substream.is_some() { - KeepAlive::Yes - } else { - KeepAlive::No - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Some(Ok(msg)))) => { - if self.pending_accept_refuses != 0 { - warn!( - target: "sub-libp2p", - "Bad state in inbound-only handler: notif before accepting substream" - ); - } - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) - }, - Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsInHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsInHandler") - .field("substream_open", &self.substream.is_some()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs deleted file mode 100644 index 414e62c0d135f..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing -//! substreams of a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, warn, error}; -use std::{ - borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker}, - time::Duration -}; -use wasm_timer::Instant; - -/// Maximum duration to open a substream and receive the handshake message. After that, we -/// consider that we failed to open the substream. -const OPEN_TIMEOUT: Duration = Duration::from_secs(10); -/// After successfully establishing a connection with the remote, we keep the connection open for -/// at least this amount of time in order to give the rest of the code the chance to notify us to -/// open substreams. -const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsOutHandler`]. -/// -/// See the documentation of [`NotifsOutHandler`] for more information. -pub struct NotifsOutHandlerProto { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, -} - -impl NotifsOutHandlerProto { - /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the - /// notifications substream. - pub fn new(protocol_name: impl Into>) -> Self { - NotifsOutHandlerProto { - protocol_name: protocol_name.into(), - } - } -} - -impl IntoProtocolsHandler for NotifsOutHandlerProto { - type Handler = NotifsOutHandler; - - fn inbound_protocol(&self) -> DeniedUpgrade { - DeniedUpgrade - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsOutHandler { - protocol_name: self.protocol_name, - when_connection_open: Instant::now(), - state: State::Disabled, - events_queue: VecDeque::new(), - } - } -} - -/// Handler for an outbound notification substream. -/// -/// When a connection is established, this handler starts in the "disabled" state, meaning that -/// no substream will be open. -/// -/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the -/// handler. Once done, the handler will try to establish then maintain an outbound substream with -/// the remote for the purpose of sending notifications to it. -pub struct NotifsOutHandler { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, - - /// Relationship with the node we're connected to. - state: State, - - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Our relationship with the node we're connected to. -enum State { - /// The handler is disabled and idle. No substream is open. - Disabled, - - /// The handler is disabled. A substream is still open and needs to be closed. - /// - /// > **Important**: Having this state means that `poll_close` has been called at least once, - /// > but the `Sink` API is unclear about whether or not the stream can then - /// > be recovered. Because of that, we must never switch from the - /// > `DisabledOpen` state to the `Open` state while keeping the same substream. - DisabledOpen(NotificationsOutSubstream), - - /// The handler is disabled but we are still trying to open a substream with the remote. - /// - /// If the handler gets enabled again, we can immediately switch to `Opening`. - DisabledOpening, - - /// The handler is enabled and we are trying to open a substream with the remote. - Opening { - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// The handler is enabled. We have tried opening a substream in the past but the remote - /// refused it. - Refused, - - /// The handler is enabled and substream is open. - Open { - /// Substream that is currently open. - substream: NotificationsOutSubstream, - /// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify - /// when the open substream closes due to being disabled or encountering an - /// error, i.e. to notify the task as soon as the substream becomes unavailable, - /// without waiting for an underlying I/O task wakeup. - close_waker: Option, - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// Poisoned state. Shouldn't be found in the wild. - Poisoned, -} - -/// Event that can be received by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerIn { - /// Enables the notifications substream for this node. The handler will try to maintain a - /// substream with the remote. - Enable { - /// Initial message to send to remote nodes when we open substreams. - initial_message: Vec, - }, - - /// Disables the notifications substream for this node. This is the default state. - Disable, -} - -/// Event that can be emitted by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerOut { - /// The notifications substream has been accepted by the remote. - Open { - /// Handshake message sent by the remote after we opened the substream. - handshake: Vec, - }, - - /// The notifications substream has been closed by the remote. - Closed, - - /// We tried to open a notifications substream, but the remote refused it. - /// - /// Can only happen if we're in a closed state. - Refused, -} - -impl NotifsOutHandler { - /// Returns true if the substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => true, - State::Opening { .. } => false, - State::Refused => false, - State::Open { .. } => true, - State::Poisoned => false, - } - } - - /// Returns `true` if there has been an attempt to open the substream, but the remote refused - /// the substream. - /// - /// Always returns `false` if the handler is in a disabled state. - pub fn is_refused(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => false, - State::Opening { .. } => false, - State::Refused => true, - State::Open { .. } => false, - State::Poisoned => false, - } - } - - /// Returns the name of the protocol that we negotiate. - pub fn protocol_name(&self) -> &Cow<'static, str> { - &self.protocol_name - } - - /// Polls whether the outbound substream is ready to send a notification. - /// - /// - Returns `Poll::Pending` if the substream is open but not ready to send a notification. - /// - Returns `Poll::Ready(true)` if the substream is ready to send a notification. - /// - Returns `Poll::Ready(false)` if the substream is closed. - /// - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll { - if let State::Open { substream, close_waker, .. } = &mut self.state { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => Poll::Ready(true), - Poll::Ready(Err(_)) => Poll::Ready(false), - Poll::Pending => { - *close_waker = Some(cx.waker().clone()); - Poll::Pending - } - } - } else { - Poll::Ready(false) - } - } - - /// Sends out a notification. - /// - /// If the substream is closed, or not ready to send out a notification yet, then the - /// notification is silently discarded. - /// - /// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine - /// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send - /// out a notification. - pub fn send_or_discard(&mut self, notification: Vec) { - if let State::Open { substream, .. } = &mut self.state { - let _ = substream.start_send_unpin(notification); - } - } -} - -impl ProtocolsHandler for NotifsOutHandler { - type InEvent = NotifsOutHandlerIn; - type OutEvent = NotifsOutHandlerOut; - type Error = void::Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = NotificationsOut; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output, - (): () - ) { - // We should never reach here. `proto` is a `Void`. - void::unreachable(proto) - } - - fn inject_fully_negotiated_outbound( - &mut self, - (handshake_msg, substream): >::Output, - _: () - ) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Opening { initial_message } => { - let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - self.state = State::Open { substream, initial_message, close_waker: None }; - }, - // If the handler was disabled while we were negotiating the protocol, immediately - // close it. - State::DisabledOpening => self.state = State::DisabledOpen(substream), - - // Any other situation should never happen. - State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => - error!("☎️ State mismatch in notifications handler: substream already open"), - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn inject_event(&mut self, message: NotifsOutHandlerIn) { - match message { - NotifsOutHandlerIn::Enable { initial_message } => { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => { - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - State::DisabledOpening => self.state = State::Opening { initial_message }, - State::DisabledOpen(mut sub) => { - // As documented above, in this state we have already called `poll_close` - // once on the substream, and it is unclear whether the substream can then - // be recovered. When in doubt, let's drop the existing substream and - // open a new one. - if sub.close().now_or_never().is_none() { - warn!( - target: "sub-libp2p", - "📞 Improperly closed outbound notifications substream" - ); - } - - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { - debug!(target: "sub-libp2p", - "Tried to enable notifications handler that was already enabled"); - self.state = st; - } - State::Poisoned => error!("Notifications handler in a poisoned state"), - } - } - - NotifsOutHandlerIn::Disable => { - match mem::replace(&mut self.state, State::Poisoned) { - st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { - debug!(target: "sub-libp2p", - "Tried to disable notifications handler that was already disabled"); - self.state = st; - } - State::Opening { .. } => self.state = State::DisabledOpening, - State::Refused => self.state = State::Disabled, - State::Open { substream, close_waker, .. } => { - if let Some(close_waker) = close_waker { - close_waker.wake(); - } - self.state = State::DisabledOpen(substream) - }, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => {}, - State::DisabledOpen(_) | State::Refused | State::Open { .. } => - error!("☎️ State mismatch in NotificationsOut"), - State::Opening { .. } => { - self.state = State::Refused; - let ev = NotifsOutHandlerOut::Refused; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - }, - State::DisabledOpening => self.state = State::Disabled, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the - // connection open no matter what, in order to avoid closing and reopening - // connections all the time. - State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, - State::Refused | State::Poisoned => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll> { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match &mut self.state { - State::Open { substream, initial_message, close_waker } => - match Sink::poll_flush(Pin::new(substream), cx) { - Poll::Pending | Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(_)) => { - if let Some(close_waker) = close_waker.take() { - close_waker.wake(); - } - - // We try to re-open a substream. - let initial_message = mem::replace(initial_message, Vec::new()); - self.state = State::Opening { initial_message: initial_message.clone() }; - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - } - }, - - State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { - Poll::Pending => {}, - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { - self.state = State::Disabled; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - }, - }, - - _ => {} - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsOutHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsOutHandler") - .field("open", &self.is_open()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index 7a040a403af73..967c0e9f8dfb4 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . #![cfg(test)] @@ -45,7 +47,6 @@ fn build_nodes() -> (Swarm, Swarm) { for index in 0 .. 2 { let keypair = keypairs[index].clone(); - let local_peer_id = keypair.public().into_peer_id(); let noise_keys = noise::Keypair::::new() .into_authentic(&keypair) @@ -54,30 +55,34 @@ fn build_nodes() -> (Swarm, Swarm) { let transport = MemoryTransport .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::Config::default()) + .multiplex(yamux::YamuxConfig::default()) .timeout(Duration::from_secs(20)) .boxed(); let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: if index == 0 { - keypairs - .iter() - .skip(1) - .map(|keypair| keypair.public().into_peer_id()) - .collect() - } else { - vec![] - }, - reserved_only: false, - priority_groups: Vec::new(), + sets: vec![ + sc_peerset::SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: if index == 0 { + keypairs + .iter() + .skip(1) + .map(|keypair| keypair.public().into_peer_id()) + .collect() + } else { + vec![] + }, + reserved_nodes: Default::default(), + reserved_only: false, + } + ], }); let behaviour = CustomProtoWithAddr { inner: GenericProto::new( - local_peer_id, "test", &[1], vec![], peerset, - iter::once(("/foo".into(), Vec::new())) + "test", &[1], vec![], peerset, + iter::once(("/foo".into(), Vec::new(), 1024 * 1024)) ), addrs: addrs .iter() @@ -243,7 +248,10 @@ fn reconnect_after_disconnect() { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; if service2_state == ServiceState::FirstConnec { - service1.disconnect_peer(Swarm::local_peer_id(&service2)); + service1.disconnect_peer( + Swarm::local_peer_id(&service2), + sc_peerset::SetId::from(0) + ); } }, ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, @@ -262,7 +270,10 @@ fn reconnect_after_disconnect() { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; if service1_state == ServiceState::FirstConnec { - service1.disconnect_peer(Swarm::local_peer_id(&service2)); + service1.disconnect_peer( + Swarm::local_peer_id(&service2), + sc_peerset::SetId::from(0) + ); } }, ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, diff --git a/client/network/src/protocol/generic_proto/upgrade.rs b/client/network/src/protocol/generic_proto/upgrade.rs index 6322a10b572a9..6917742d8abb8 100644 --- a/client/network/src/protocol/generic_proto/upgrade.rs +++ b/client/network/src/protocol/generic_proto/upgrade.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/network/src/protocol/generic_proto/upgrade/collec.rs b/client/network/src/protocol/generic_proto/upgrade/collec.rs index f8d199974940f..8531fb8bdfdbf 100644 --- a/client/network/src/protocol/generic_proto/upgrade/collec.rs +++ b/client/network/src/protocol/generic_proto/upgrade/collec.rs @@ -1,22 +1,20 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index 1b2b97253d1ae..6a5ceb5571f98 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,8 +19,8 @@ use crate::config::ProtocolId; use bytes::BytesMut; use futures::prelude::*; -use futures_codec::Framed; -use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; +use asynchronous_codec::Framed; +use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; use parking_lot::RwLock; use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter}; use std::task::{Context, Poll}; @@ -85,34 +85,18 @@ impl Clone for RegisteredProtocol { pub struct RegisteredProtocolSubstream { /// If true, we are in the process of closing the sink. is_closing: bool, - /// Whether the local node opened this substream (dialer), or we received this substream from - /// the remote (listener). - endpoint: Endpoint, /// Buffer of packets to send. send_queue: VecDeque, /// If true, we should call `poll_complete` on the inner sink. requires_poll_flush: bool, /// The underlying substream. inner: stream::Fuse>>, - /// Version of the protocol that was negotiated. - protocol_version: u8, /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one /// unless the buffer empties then fills itself again. clogged_fuse: bool, } impl RegisteredProtocolSubstream { - /// Returns the version of the protocol that was negotiated. - pub fn protocol_version(&self) -> u8 { - self.protocol_version - } - - /// Returns whether the local node opened this substream (dialer), or we received this - /// substream from the remote (listener). - pub fn endpoint(&self) -> Endpoint { - self.endpoint - } - /// Starts a graceful shutdown process on this substream. /// /// Note that "graceful" means that we sent a closing message. We don't wait for any @@ -246,7 +230,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, fn upgrade_inbound( self, socket: TSubstream, - info: Self::Info, + _: Self::Info, ) -> Self::Future { Box::pin(async move { let mut framed = { @@ -262,11 +246,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((RegisteredProtocolSubstream { is_closing: false, - endpoint: Endpoint::Listener, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), - protocol_version: info.version, clogged_fuse: false, }, received_handshake.to_vec())) }) @@ -283,7 +265,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, fn upgrade_outbound( self, socket: TSubstream, - info: Self::Info, + _: Self::Info, ) -> Self::Future { Box::pin(async move { let mut framed = { @@ -301,11 +283,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((RegisteredProtocolSubstream { is_closing: false, - endpoint: Endpoint::Dialer, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), - protocol_version: info.version, clogged_fuse: false, }, received_handshake.to_vec())) }) diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index 64b4b980da002..eba96441bcfde 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . /// Notifications protocol. /// @@ -36,10 +38,10 @@ use bytes::BytesMut; use futures::prelude::*; -use futures_codec::Framed; +use asynchronous_codec::Framed; use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; use log::error; -use std::{borrow::Cow, convert::Infallible, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use std::{borrow::Cow, convert::{Infallible, TryFrom as _}, io, iter, mem, pin::Pin, task::{Context, Poll}}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. @@ -51,6 +53,8 @@ const MAX_HANDSHAKE_SIZE: usize = 1024; pub struct NotificationsIn { /// Protocol name to use when negotiating the substream. protocol_name: Cow<'static, str>, + /// Maximum allowed size for a single notification. + max_notification_size: u64, } /// Upgrade that opens a substream, waits for the remote to accept by sending back a status @@ -61,6 +65,8 @@ pub struct NotificationsOut { protocol_name: Cow<'static, str>, /// Message to send when we start the handshake. initial_message: Vec, + /// Maximum allowed size for a single notification. + max_notification_size: u64, } /// A substream for incoming notification messages. @@ -100,16 +106,12 @@ pub struct NotificationsOutSubstream { impl NotificationsIn { /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>) -> Self { + pub fn new(protocol_name: impl Into>, max_notification_size: u64) -> Self { NotificationsIn { protocol_name: protocol_name.into(), + max_notification_size, } } - - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &Cow<'static, str> { - &self.protocol_name - } } impl UpgradeInfo for NotificationsIn { @@ -151,8 +153,11 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, socket.read_exact(&mut initial_message).await?; } + let mut codec = UviBytes::default(); + codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::max_value())); + let substream = NotificationsInSubstream { - socket: Framed::new(socket, UviBytes::default()), + socket: Framed::new(socket, codec), handshake: NotificationsInSubstreamHandshake::NotSent, }; @@ -290,7 +295,11 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, impl NotificationsOut { /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { + pub fn new( + protocol_name: impl Into>, + initial_message: impl Into>, + max_notification_size: u64, + ) -> Self { let initial_message = initial_message.into(); if initial_message.len() > MAX_HANDSHAKE_SIZE { error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); @@ -299,6 +308,7 @@ impl NotificationsOut { NotificationsOut { protocol_name: protocol_name.into(), initial_message, + max_notification_size, } } } @@ -345,8 +355,11 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, socket.read_exact(&mut handshake).await?; } + let mut codec = UviBytes::default(); + codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::max_value())); + Ok((handshake, NotificationsOutSubstream { - socket: Framed::new(socket, UviBytes::default()), + socket: Framed::new(socket, codec), })) }) } @@ -439,7 +452,7 @@ mod tests { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); let (handshake, mut substream) = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + NotificationsOut::new(PROTO_NAME, &b"initial message"[..], 1024 * 1024), upgrade::Version::V1 ).await.unwrap(); @@ -454,7 +467,7 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let (initial_message, mut substream) = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) + NotificationsIn::new(PROTO_NAME, 1024 * 1024) ).await.unwrap(); assert_eq!(initial_message, b"initial message"); @@ -478,7 +491,7 @@ mod tests { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); let (handshake, mut substream) = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, vec![]), + NotificationsOut::new(PROTO_NAME, vec![], 1024 * 1024), upgrade::Version::V1 ).await.unwrap(); @@ -493,7 +506,7 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let (initial_message, mut substream) = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) + NotificationsIn::new(PROTO_NAME, 1024 * 1024) ).await.unwrap(); assert!(initial_message.is_empty()); @@ -515,7 +528,7 @@ mod tests { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); let outcome = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, &b"hello"[..]), + NotificationsOut::new(PROTO_NAME, &b"hello"[..], 1024 * 1024), upgrade::Version::V1 ).await; @@ -532,7 +545,7 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let (initial_msg, substream) = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) + NotificationsIn::new(PROTO_NAME, 1024 * 1024) ).await.unwrap(); assert_eq!(initial_msg, b"hello"); @@ -554,7 +567,7 @@ mod tests { let ret = upgrade::apply_outbound( socket, // We check that an initial message that is too large gets refused. - NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>()), + NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>(), 1024 * 1024), upgrade::Version::V1 ).await; assert!(ret.is_err()); @@ -567,7 +580,7 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let ret = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) + NotificationsIn::new(PROTO_NAME, 1024 * 1024) ).await; assert!(ret.is_err()); }); @@ -584,7 +597,7 @@ mod tests { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); let ret = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + NotificationsOut::new(PROTO_NAME, &b"initial message"[..], 1024 * 1024), upgrade::Version::V1 ).await; assert!(ret.is_err()); @@ -597,7 +610,7 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let (initial_message, mut substream) = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) + NotificationsIn::new(PROTO_NAME, 1024 * 1024) ).await.unwrap(); assert_eq!(initial_message, b"initial message"); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 1cd78c0ed1dda..3161f91e533c0 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,7 +25,6 @@ pub use self::generic::{ BlockAnnounce, RemoteCallRequest, RemoteReadRequest, RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, - FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; use sc_client_api::StorageProof; @@ -96,7 +95,7 @@ impl BlockAttributes { } impl Encode for BlockAttributes { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { dest.push_byte(self.bits()) } } @@ -199,7 +198,7 @@ pub mod generic { } impl codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { dest.push_byte(self.bits()) } } @@ -216,7 +215,7 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { /// Identifies consensus engine. - pub engine_id: ConsensusEngineId, + pub protocol: ConsensusEngineId, /// Message payload. pub data: Vec, } @@ -280,40 +279,13 @@ pub mod generic { RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. RemoteReadChildRequest(RemoteReadChildRequest), - /// Finality proof request. - FinalityProofRequest(FinalityProofRequest), - /// Finality proof response. - FinalityProofResponse(FinalityProofResponse), /// Batch of consensus protocol messages. + // NOTE: index is incremented by 2 due to finality proof related + // messages that were removed. + #[codec(index = 17)] ConsensusBatch(Vec), } - impl Message { - /// Message id useful for logging. - pub fn id(&self) -> &'static str { - match self { - Message::Status(_) => "Status", - Message::BlockRequest(_) => "BlockRequest", - Message::BlockResponse(_) => "BlockResponse", - Message::BlockAnnounce(_) => "BlockAnnounce", - Message::Transactions(_) => "Transactions", - Message::Consensus(_) => "Consensus", - Message::RemoteCallRequest(_) => "RemoteCallRequest", - Message::RemoteCallResponse(_) => "RemoteCallResponse", - Message::RemoteReadRequest(_) => "RemoteReadRequest", - Message::RemoteReadResponse(_) => "RemoteReadResponse", - Message::RemoteHeaderRequest(_) => "RemoteHeaderRequest", - Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", - Message::RemoteChangesRequest(_) => "RemoteChangesRequest", - Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", - Message::FinalityProofRequest(_) => "FinalityProofRequest", - Message::FinalityProofResponse(_) => "FinalityProofResponse", - Message::ConsensusBatch(_) => "ConsensusBatch", - } - } - } - /// Status sent on connection. // TODO https://github.com/paritytech/substrate/issues/4674: replace the `Status` // struct with this one, after waiting a few releases beyond `NetworkSpecialization`'s @@ -430,7 +402,7 @@ pub mod generic { // This assumes that the packet contains nothing but the announcement message. // TODO: Get rid of it once protocol v4 is common. impl Encode for BlockAnnounce { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { self.header.encode_to(dest); if let Some(state) = &self.state { state.encode_to(dest); @@ -546,26 +518,4 @@ pub mod generic { /// Missing changes tries roots proof. pub roots_proof: StorageProof, } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof request. - pub struct FinalityProofRequest { - /// Unique request id. - pub id: RequestId, - /// Hash of the block to request proof for. - pub block: H, - /// Additional data blob (that both requester and provider understood) required for proving finality. - pub request: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof response. - pub struct FinalityProofResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Hash of the block (the same as in the FinalityProofRequest). - pub block: H, - /// Finality proof (if available). - pub proof: Option>, - } } diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 03714b05ace0d..35f840152217f 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -1,18 +1,20 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, + +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// + // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Contains the state of the chain synchronization process //! @@ -34,10 +36,8 @@ use sp_consensus::{BlockOrigin, BlockStatus, block_validation::{BlockAnnounceValidator, Validation}, import_queue::{IncomingBlock, BlockImportResult, BlockImportError} }; -use crate::{ - config::BoxFinalityProofRequestBuilder, - protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, - FinalityProofResponse, Roles}, +use crate::protocol::message::{ + self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, Roles, }; use either::Either; use extra_requests::ExtraRequests; @@ -46,12 +46,14 @@ use log::{debug, trace, warn, info, error}; use sp_runtime::{ Justification, generic::BlockId, - traits::{Block as BlockT, Header, NumberFor, Zero, One, CheckedSub, SaturatedConversion, Hash, HashFor} + traits::{ + Block as BlockT, Header as HeaderT, NumberFor, Zero, One, CheckedSub, SaturatedConversion, + Hash, HashFor, + }, }; use sp_arithmetic::traits::Saturating; use std::{ - fmt, ops::Range, collections::{HashMap, hash_map::Entry, HashSet, VecDeque}, - sync::Arc, pin::Pin, + fmt, ops::Range, collections::{HashMap, hash_map::Entry, HashSet}, sync::Arc, pin::Pin, }; use futures::{task::Poll, Future, stream::FuturesUnordered, FutureExt, StreamExt}; @@ -67,6 +69,10 @@ const MAX_IMPORTING_BLOCKS: usize = 2048; /// Maximum blocks to download ahead of any gap. const MAX_DOWNLOAD_AHEAD: u32 = 2048; +/// Maximum blocks to look backwards. The gap is the difference between the highest block and the +/// common block of a node. +const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; + /// Maximum number of concurrent block announce validations. /// /// If the queue reaches the maximum, we drop any new block @@ -85,9 +91,6 @@ const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; /// so far behind. const MAJOR_SYNC_BLOCKS: u8 = 5; -/// Number of recently announced blocks to track for each peer. -const ANNOUNCE_HISTORY_SIZE: usize = 64; - mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -110,17 +113,17 @@ mod rep { /// Peer did not provide us with advertised block data. pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); - /// Reputation change for peers which send us a known block. - pub const KNOWN_BLOCK: Rep = Rep::new(-(1 << 29), "Duplicate block"); + /// Reputation change for peers which send us non-requested block data. + pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); /// Reputation change for peers which send us a block with bad justifications. pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change for peers which send us a block with bad finality proof. - pub const BAD_FINALITY_PROOF: Rep = Rep::new(-(1 << 16), "Bad finality proof"); - /// Reputation change when a peer sent us invlid ancestry result. pub const UNKNOWN_ANCESTOR:Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); } enum PendingRequests { @@ -185,18 +188,11 @@ pub struct ChainSync { /// What block attributes we require for this node, usually derived from /// what role we are, but could be customized required_block_attributes: message::BlockAttributes, - /// Any extra finality proof requests. - extra_finality_proofs: ExtraRequests, /// Any extra justification requests. extra_justifications: ExtraRequests, /// A set of hashes of blocks that are being downloaded or have been /// downloaded and are queued for import. queue_blocks: HashSet, - /// The best block number that was successfully imported into the chain. - /// This can not decrease. - best_imported_number: NumberFor, - /// Finality proof handler. - request_builder: Option>, /// Fork sync targets. fork_targets: HashMap>, /// A set of peers for which there might be potential block requests @@ -218,6 +214,8 @@ pub struct ChainSync { /// All the data we have about a Peer that we are trying to sync with #[derive(Debug, Clone)] pub struct PeerSync { + /// Peer id of this peer. + pub peer_id: PeerId, /// The common number is the block number that is a common point of /// ancestry for both our chains (as far as we know). pub common_number: NumberFor, @@ -228,9 +226,22 @@ pub struct PeerSync { /// The state of syncing this peer is in for us, generally categories /// into `Available` or "busy" with something as defined by `PeerSyncState`. pub state: PeerSyncState, - /// A queue of blocks that this peer has announced to us, should only - /// contain `ANNOUNCE_HISTORY_SIZE` entries. - pub recently_announced: VecDeque +} + +impl PeerSync { + /// Update the `common_number` iff `new_common > common_number`. + fn update_common_number(&mut self, new_common: NumberFor) { + if self.common_number < new_common { + trace!( + target: "sync", + "Updating peer {} common number from={} => to={}.", + self.peer_id, + self.common_number, + new_common, + ); + self.common_number = new_common; + } + } } /// The sync status of a peer we are trying to sync with @@ -270,17 +281,11 @@ pub enum PeerSyncState { DownloadingStale(B::Hash), /// Downloading justification for given block hash. DownloadingJustification(B::Hash), - /// Downloading finality proof for given block hash. - DownloadingFinalityProof(B::Hash) } impl PeerSyncState { pub fn is_available(&self) -> bool { - if let PeerSyncState::Available = self { - true - } else { - false - } + matches!(self, Self::Available) } } @@ -327,6 +332,18 @@ pub enum OnBlockData { Request(PeerId, BlockRequest) } +impl OnBlockData { + /// Returns `self` as request. + #[cfg(test)] + fn into_request(self) -> Option<(PeerId, BlockRequest)> { + if let Self::Request(peer, req) = self { + Some((peer, req)) + } else { + None + } + } +} + /// Result of [`ChainSync::poll_block_announce_validation`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum PollBlockAnnounceValidation { @@ -336,6 +353,8 @@ pub enum PollBlockAnnounceValidation { Failure { /// Who sent the processed block announcement? who: PeerId, + /// Should the peer be disconnected? + disconnect: bool, }, /// The announcement does not require further handling. Nothing { @@ -343,8 +362,8 @@ pub enum PollBlockAnnounceValidation { who: PeerId, /// Was this their new best block? is_best: bool, - /// The header of the announcement. - header: H, + /// The announcement. + announce: BlockAnnounce, }, /// The announcement header should be imported. ImportHeader { @@ -352,9 +371,11 @@ pub enum PollBlockAnnounceValidation { who: PeerId, /// Was this their new best block? is_best: bool, - /// The header of the announcement. - header: H, + /// The announcement. + announce: BlockAnnounce, }, + /// The block announcement should be skipped. + Skip, } /// Result of [`ChainSync::block_announce_validation`]. @@ -366,15 +387,8 @@ enum PreValidateBlockAnnounce { Failure { /// Who sent the processed block announcement? who: PeerId, - }, - /// The announcement does not require further handling. - Nothing { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, + /// Should the peer be disconnected? + disconnect: bool, }, /// The pre-validation was sucessful and the announcement should be /// further processed. @@ -386,6 +400,19 @@ enum PreValidateBlockAnnounce { /// The announcement. announce: BlockAnnounce, }, + /// The announcement validation returned an error. + /// + /// An error means that *this* node failed to validate it because some internal error happened. + /// If the block announcement was invalid, [`Self::Failure`] is the correct variant to express + /// this. + Error { + who: PeerId, + }, + /// The block announcement should be skipped. + /// + /// This should *only* be returned when there wasn't a slot registered + /// for this block announcement validation. + Skip, } /// Result of [`ChainSync::on_block_justification`]. @@ -402,20 +429,6 @@ pub enum OnBlockJustification { } } -/// Result of [`ChainSync::on_block_finality_proof`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockFinalityProof { - /// The proof needs no further handling. - Nothing, - /// The proof should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - proof: Vec - } -} - /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. enum HasSlotForBlockAnnounceValidation { /// Yes, there is a slot for the block announce validation. @@ -432,7 +445,6 @@ impl ChainSync { role: Roles, client: Arc>, info: &BlockchainInfo, - request_builder: Option>, block_announce_validator: Box + Send>, max_parallel_downloads: u32, ) -> Self { @@ -448,13 +460,10 @@ impl ChainSync { blocks: BlockCollection::new(), best_queued_hash: info.best_hash, best_queued_number: info.best_number, - best_imported_number: info.best_number, - extra_finality_proofs: ExtraRequests::new("finality proof"), extra_justifications: ExtraRequests::new("justification"), role, required_block_attributes, queue_blocks: Default::default(), - request_builder, fork_targets: Default::default(), pending_requests: Default::default(), block_announce_validator, @@ -537,61 +546,70 @@ impl ChainSync { self.best_queued_hash, self.best_queued_number ); - self.peers.insert(who, PeerSync { + self.peers.insert(who.clone(), PeerSync { + peer_id: who, common_number: self.best_queued_number, best_hash, best_number, state: PeerSyncState::Available, - recently_announced: Default::default() }); return Ok(None) } // If we are at genesis, just start downloading. - if self.best_queued_number.is_zero() { - debug!(target:"sync", "New peer with best hash {} ({}).", best_hash, best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: Zero::zero(), + let (state, req) = if self.best_queued_number.is_zero() { + debug!( + target:"sync", + "New peer with best hash {} ({}).", best_hash, best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - self.pending_requests.add(&who); - return Ok(None) - } + ); - let common_best = std::cmp::min(self.best_queued_number, best_number); + (PeerSyncState::Available, None) + } else { + let common_best = std::cmp::min(self.best_queued_number, best_number); - debug!(target:"sync", - "New peer with unknown best hash {} ({}), searching for common ancestor.", - best_hash, - best_number - ); + debug!( + target:"sync", + "New peer with unknown best hash {} ({}), searching for common ancestor.", + best_hash, + best_number + ); + + ( + PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + Some(ancestry_request::(common_best)) + ) + }; self.pending_requests.add(&who); - self.peers.insert(who, PeerSync { + self.peers.insert(who.clone(), PeerSync { + peer_id: who, common_number: Zero::zero(), best_hash, best_number, - state: PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }, - recently_announced: Default::default() + state, }); - Ok(Some(ancestry_request::(common_best))) + Ok(req) } Ok(BlockStatus::Queued) | Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { - debug!(target:"sync", "New peer with known best hash {} ({}).", best_hash, best_number); + debug!( + target: "sync", + "New peer with known best hash {} ({}).", + best_hash, + best_number, + ); self.peers.insert(who.clone(), PeerSync { + peer_id: who.clone(), common_number: best_number, best_hash, best_number, state: PeerSyncState::Available, - recently_announced: Default::default(), }); self.pending_requests.add(&who); Ok(None) @@ -613,14 +631,6 @@ impl ChainSync { }) } - /// Schedule a finality proof request for the given block. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_finality_proofs.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) - } - /// Request syncing for the given block from given set of peers. // The implementation is similar to on_block_announce with unknown parent hash. pub fn set_sync_fork_request( @@ -700,30 +710,6 @@ impl ChainSync { }) } - /// Get an iterator over all scheduled finality proof requests. - pub fn finality_proof_requests(&mut self) -> impl Iterator)> + '_ { - let peers = &mut self.peers; - let request_builder = &mut self.request_builder; - let mut matcher = self.extra_finality_proofs.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") - .state = PeerSyncState::DownloadingFinalityProof(request.0); - let req = message::generic::FinalityProofRequest { - id: 0, - block: request.0, - request: request_builder.as_mut() - .map(|builder| builder.build_request_data(&request.0)) - .unwrap_or_default() - }; - Some((peer, req)) - } else { - None - } - }) - } - /// Get an iterator over all block requests of all peers. pub fn block_requests(&mut self) -> impl Iterator)> + '_ { if self.pending_requests.is_empty() { @@ -748,7 +734,21 @@ impl ChainSync { return None } - if let Some((range, req)) = peer_block_request( + // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from the + // common number, the peer best number is higher than our best queued and the common + // number is smaller than the last finalized block number, we should do an ancestor + // search to find a better common block. + if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() + && best_queued < peer.best_number && peer.common_number < last_finalized + { + let current = std::cmp::min(peer.best_number, best_queued); + peer.state = PeerSyncState::AncestorSearch { + current, + start: best_queued, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }; + Some((id, ancestry_request::(current))) + } else if let Some((range, req)) = peer_block_request( id, peer, blocks, @@ -811,13 +811,13 @@ impl ChainSync { blocks.reverse() } self.pending_requests.add(who); - if request.is_some() { + if let Some(request) = request { match &mut peer.state { PeerSyncState::DownloadingNew(start_block) => { self.blocks.clear_peer_download(who); let start_block = *start_block; peer.state = PeerSyncState::Available; - validate_blocks::(&blocks, who)?; + validate_blocks::(&blocks, who, Some(request))?; self.blocks.insert(start_block, blocks, who.clone()); self.blocks .drain(self.best_queued_number + One::one()) @@ -840,7 +840,7 @@ impl ChainSync { debug!(target: "sync", "Empty block response from {}", who); return Err(BadPeer(who.clone(), rep::NO_BLOCK)); } - validate_blocks::(&blocks, who)?; + validate_blocks::(&blocks, who, Some(request))?; blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -856,15 +856,29 @@ impl ChainSync { PeerSyncState::AncestorSearch { current, start, state } => { let matching_hash = match (blocks.get(0), self.client.hash(*current)) { (Some(block), Ok(maybe_our_block_hash)) => { - trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", current, block.hash, who); + trace!( + target: "sync", + "Got ancestry block #{} ({}) from peer {}", + current, + block.hash, + who, + ); maybe_our_block_hash.filter(|x| x == &block.hash) }, (None, _) => { - debug!(target: "sync", "Invalid response when searching for ancestor from {}", who); + debug!( + target: "sync", + "Invalid response when searching for ancestor from {}", + who, + ); return Err(BadPeer(who.clone(), rep::UNKNOWN_ANCESTOR)) }, (_, Err(e)) => { - info!("❌ Error answering legitimate blockchain query: {:?}", e); + info!( + target: "sync", + "❌ Error answering legitimate blockchain query: {:?}", + e, + ); return Err(BadPeer(who.clone(), rep::BLOCKCHAIN_READ_ERROR)) } }; @@ -883,17 +897,23 @@ impl ChainSync { trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); return Err(BadPeer(who.clone(), rep::GENESIS_MISMATCH)) } - if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) { + if let Some((next_state, next_num)) = + handle_ancestor_search_state(state, *current, matching_hash.is_some()) + { peer.state = PeerSyncState::AncestorSearch { current: next_num, start: *start, state: next_state, }; - return Ok(OnBlockData::Request(who.clone(), ancestry_request::(next_num))) + return Ok( + OnBlockData::Request(who.clone(), ancestry_request::(next_num)) + ) } else { // Ancestry search is complete. Check if peer is on a stale fork unknown to us and // add it to sync targets if necessary. - trace!(target: "sync", "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + trace!( + target: "sync", + "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", self.best_queued_hash, self.best_queued_number, peer.best_hash, @@ -904,7 +924,12 @@ impl ChainSync { if peer.common_number < peer.best_number && peer.best_number < self.best_queued_number { - trace!(target: "sync", "Added fork target {} for {}" , peer.best_hash, who); + trace!( + target: "sync", + "Added fork target {} for {}", + peer.best_hash, + who, + ); self.fork_targets .entry(peer.best_hash.clone()) .or_insert_with(|| ForkTarget { @@ -920,12 +945,11 @@ impl ChainSync { } | PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) - | PeerSyncState::DownloadingFinalityProof(..) => Vec::new() + | PeerSyncState::DownloadingJustification(..) => Vec::new() } } else { // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, who)?; + validate_blocks::(&blocks, who, None)?; blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -939,40 +963,30 @@ impl ChainSync { }).collect() } } else { - Vec::new() + // We don't know of this peer, so we also did not request anything from it. + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)); }; - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - let is_recent = new_blocks.first() - .map(|block| { - self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash)) - }) - .unwrap_or(false); - - if !is_recent && new_blocks.last().map_or(false, |b| self.is_known(&b.hash)) { - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - debug!(target: "sync", "Ignoring known blocks from {}", who); - return Err(BadPeer(who.clone(), rep::KNOWN_BLOCK)); - } let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); } - let origin = - if is_recent { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; + let origin = if self.status().state != SyncState::Downloading { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; if let Some((h, n)) = new_blocks.last().and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { - trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, origin); + trace!( + target:"sync", + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, + ); self.on_block_queued(h, n) } @@ -1033,41 +1047,6 @@ impl ChainSync { Ok(OnBlockJustification::Nothing) } - /// Handle new finality proof data. - pub fn on_block_finality_proof - (&mut self, who: PeerId, resp: FinalityProofResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); - return Ok(OnBlockFinalityProof::Nothing) - }; - - self.pending_requests.add(&who); - if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one finality proof at a time. - if hash != resp.block { - info!( - target: "sync", - "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", - hash, - resp.block - ); - return Err(BadPeer(who, rep::BAD_FINALITY_PROOF)); - } - - if let Some((peer, hash, number, p)) = self.extra_finality_proofs.on_response(who, resp.proof) { - return Ok(OnBlockFinalityProof::Import { peer, hash, number, proof: p }) - } - } - - Ok(OnBlockFinalityProof::Nothing) - } - /// A batch of blocks have been processed, with or without errors. /// /// Call this when a batch of blocks have been processed by the import @@ -1098,7 +1077,11 @@ impl ChainSync { } match result { - Ok(BlockImportResult::ImportedKnown(_number)) => {} + Ok(BlockImportResult::ImportedKnown(number, who)) => { + if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { + peer.update_common_number(number); + } + } Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( @@ -1111,43 +1094,57 @@ impl ChainSync { } if aux.needs_justification { - trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); + trace!( + target: "sync", + "Block imported but requires justification {}: {:?}", + number, + hash, + ); self.request_justification(&hash, number); } if aux.bad_justification { - if let Some(peer) = who { + if let Some(ref peer) = who { info!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(peer, rep::BAD_JUSTIFICATION))); + output.push(Err(BadPeer(peer.clone(), rep::BAD_JUSTIFICATION))); } } - if aux.needs_finality_proof { - trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); - self.request_finality_proof(&hash, number); - } - - if number > self.best_imported_number { - self.best_imported_number = number; + if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { + peer.update_common_number(number); } }, Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { - warn!("💔 Peer sent block with incomplete header to import"); + warn!( + target: "sync", + "💔 Peer sent block with incomplete header to import", + ); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); } }, Err(BlockImportError::VerificationFailed(who, e)) => { if let Some(peer) = who { - warn!("💔 Verification failed for block {:?} received from peer: {}, {:?}", hash, peer, e); + warn!( + target: "sync", + "💔 Verification failed for block {:?} received from peer: {}, {:?}", + hash, + peer, + e, + ); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); output.extend(self.restart()); } }, Err(BlockImportError::BadBlock(who)) => { if let Some(peer) = who { - info!("💔 Block {:?} received from peer {} has been blacklisted", hash, peer); + info!( + target: "sync", + "💔 Block {:?} received from peer {} has been blacklisted", + hash, + peer, + ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); } }, @@ -1178,29 +1175,19 @@ impl ChainSync { self.pending_requests.set_all(); } - pub fn on_finality_proof_import(&mut self, req: (B::Hash, NumberFor), res: Result<(B::Hash, NumberFor), ()>) { - self.extra_finality_proofs.try_finalize_root(req, res, true); - self.pending_requests.set_all(); - } - /// Notify about finalization of the given block. pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_finality_proofs.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) - } - let client = &self.client; let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { is_descendent_of(&**client, base, block) }); if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra justification data requests: {:?}", err); + warn!( + target: "sync", + "💔 Error cleaning up pending extra justification data requests: {:?}", + err, + ); } } @@ -1247,6 +1234,11 @@ impl ChainSync { /// is capped. /// /// Returns [`HasSlotForBlockAnnounceValidation`] to inform about the result. + /// + /// # Note + /// + /// It is *required* to call [`Self::peer_block_announce_validation_finished`] when the + /// validation is finished to clear the slot. fn has_slot_for_block_announce_validation(&mut self, peer: &PeerId) -> HasSlotForBlockAnnounceValidation { if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached @@ -1297,7 +1289,7 @@ impl ChainSync { who, hash, ); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } + PreValidateBlockAnnounce::Skip }.boxed()); return } @@ -1314,7 +1306,7 @@ impl ChainSync { hash, who, ); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } + PreValidateBlockAnnounce::Skip }.boxed()); return } @@ -1327,7 +1319,7 @@ impl ChainSync { hash, who, ); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } + PreValidateBlockAnnounce::Skip }.boxed()); return } @@ -1345,18 +1337,23 @@ impl ChainSync { announce, who, }, - Ok(Validation::Failure) => { + Ok(Validation::Failure { disconnect }) => { debug!( target: "sync", - "Block announcement validation of block {} from {} failed", + "Block announcement validation of block {:?} from {} failed", hash, who, ); - PreValidateBlockAnnounce::Failure { who } + PreValidateBlockAnnounce::Failure { who, disconnect } } Err(e) => { - error!(target: "sync", "💔 Block announcement validation errored: {}", e); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } + error!( + target: "sync", + "💔 Block announcement validation of block {:?} errored: {}", + hash, + e, + ); + PreValidateBlockAnnounce::Error { who } } } }.boxed()); @@ -1376,14 +1373,27 @@ impl ChainSync { cx: &mut std::task::Context, ) -> Poll> { match self.block_announce_validation.poll_next_unpin(cx) { - Poll::Ready(Some(res)) => Poll::Ready(self.finish_block_announce_validation(res)), + Poll::Ready(Some(res)) => { + self.peer_block_announce_validation_finished(&res); + Poll::Ready(self.finish_block_announce_validation(res)) + }, _ => Poll::Pending, } } - /// Should be called when a block announce validation was finished, to update the stats - /// of the given peer. - fn peer_block_announce_validation_finished(&mut self, peer: &PeerId) { + /// Should be called when a block announce validation is finished, to update the slots + /// of the peer that send the block announce. + fn peer_block_announce_validation_finished( + &mut self, + res: &PreValidateBlockAnnounce, + ) { + let peer = match res { + PreValidateBlockAnnounce::Failure { who, .. } | + PreValidateBlockAnnounce::Process { who, .. } | + PreValidateBlockAnnounce::Error { who } => who, + PreValidateBlockAnnounce::Skip => return, + }; + match self.block_announce_validation_per_peer_stats.entry(peer.clone()) { Entry::Vacant(_) => { error!( @@ -1393,7 +1403,8 @@ impl ChainSync { ); }, Entry::Occupied(mut entry) => { - if entry.get_mut().saturating_sub(1) == 0 { + *entry.get_mut() = entry.get().saturating_sub(1); + if *entry.get() == 0 { entry.remove(); } } @@ -1405,25 +1416,26 @@ impl ChainSync { &mut self, pre_validation_result: PreValidateBlockAnnounce, ) -> PollBlockAnnounceValidation { + trace!( + target: "sync", + "Finished block announce validation: {:?}", + pre_validation_result, + ); + let (announce, is_best, who) = match pre_validation_result { - PreValidateBlockAnnounce::Nothing { is_best, who, announce } => { - self.peer_block_announce_validation_finished(&who); - return PollBlockAnnounceValidation::Nothing { is_best, who, header: announce.header } - }, - PreValidateBlockAnnounce::Failure { who } => { - self.peer_block_announce_validation_finished(&who); - return PollBlockAnnounceValidation::Failure { who } + PreValidateBlockAnnounce::Failure { who, disconnect } => { + return PollBlockAnnounceValidation::Failure { who, disconnect } }, PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { - self.peer_block_announce_validation_finished(&who); (announce, is_new_best, who) }, + PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => + return PollBlockAnnounceValidation::Skip, }; - let header = announce.header; - let number = *header.number(); - let hash = header.hash(); - let parent_status = self.block_status(header.parent_hash()).unwrap_or(BlockStatus::Unknown); + let number = *announce.header.number(); + let hash = announce.header.hash(); + let parent_status = self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); let known_parent = parent_status != BlockStatus::Unknown; let ancient_parent = parent_status == BlockStatus::InChainPruned; @@ -1432,14 +1444,9 @@ impl ChainSync { peer } else { error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } }; - while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { - peer.recently_announced.pop_front(); - } - peer.recently_announced.push_back(hash.clone()); - if is_best { // update their best block peer.best_number = number; @@ -1447,18 +1454,19 @@ impl ChainSync { } if let PeerSyncState::AncestorSearch {..} = peer.state { - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + trace!(target: "sync", "Peer state is ancestor search."); + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } // If the announced block is the best they have and is not ahead of us, our common number // is either one further ahead or it's the one they just announced, if we know about it. if is_best { if known && self.best_queued_number >= number { - peer.common_number = number - } else if header.parent_hash() == &self.best_queued_hash + peer.update_common_number(number); + } else if announce.header.parent_hash() == &self.best_queued_hash || known_parent && self.best_queued_number >= number { - peer.common_number = number - One::one(); + peer.update_common_number(number - One::one()); } } self.pending_requests.add(&who); @@ -1469,36 +1477,52 @@ impl ChainSync { if let Some(target) = self.fork_targets.get_mut(&hash) { target.peers.insert(who.clone()); } - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } if ancient_parent { - trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + trace!( + target: "sync", + "Ignored ancient block announced from {}: {} {:?}", + who, + hash, + announce.header, + ); + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } let requires_additional_data = !self.role.is_light() || !known_parent; if !requires_additional_data { - trace!(target: "sync", "Importing new header announced from {}: {} {:?}", who, hash, header); - return PollBlockAnnounceValidation::ImportHeader { is_best, header, who } + trace!( + target: "sync", + "Importing new header announced from {}: {} {:?}", + who, + hash, + announce.header, + ); + return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who } } if number <= self.best_queued_number { trace!( target: "sync", - "Added sync target for block announced from {}: {} {:?}", who, hash, header + "Added sync target for block announced from {}: {} {:?}", + who, + hash, + announce.header, ); self.fork_targets .entry(hash.clone()) .or_insert_with(|| ForkTarget { number, - parent_hash: Some(*header.parent_hash()), + parent_hash: Some(*announce.header.parent_hash()), peers: Default::default(), }) .peers.insert(who.clone()); } - PollBlockAnnounceValidation::Nothing { is_best, who, header } + trace!(target: "sync", "Announce validation result is nothing"); + PollBlockAnnounceValidation::Nothing { is_best, who, announce } } /// Call when a peer has disconnected. @@ -1506,31 +1530,28 @@ impl ChainSync { self.blocks.clear_peer_download(who); self.peers.remove(who); self.extra_justifications.peer_disconnected(who); - self.extra_finality_proofs.peer_disconnected(who); self.pending_requests.set_all(); } /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. - /// their state was `DownloadingJustification` or `DownloadingFinalityProof`) are unaffected and - /// will stay in the same state. + /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. fn restart<'a>( &'a mut self, ) -> impl Iterator), BadPeer>> + 'a { self.blocks.clear(); let info = self.client.info(); self.best_queued_hash = info.best_hash; - self.best_queued_number = std::cmp::max(info.best_number, self.best_imported_number); + self.best_queued_number = info.best_number; self.pending_requests.set_all(); debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); let old_peers = std::mem::take(&mut self.peers); old_peers.into_iter().filter_map(move |(id, p)| { - // peers that were downloading justifications or finality proofs + // peers that were downloading justifications // should be kept in that state. match p.state { - PeerSyncState::DownloadingJustification(_) - | PeerSyncState::DownloadingFinalityProof(_) => { + PeerSyncState::DownloadingJustification(_) => { self.peers.insert(id, p); return None; } @@ -1570,7 +1591,6 @@ impl ChainSync { Metrics { queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - finality_proofs: self.extra_finality_proofs.metrics(), justifications: self.extra_justifications.metrics(), _priv: () } @@ -1581,7 +1601,6 @@ impl ChainSync { pub(crate) struct Metrics { pub(crate) queued_blocks: u32, pub(crate) fork_targets: u32, - pub(crate) finality_proofs: extra_requests::Metrics, pub(crate) justifications: extra_requests::Metrics, _priv: () } @@ -1621,7 +1640,7 @@ pub enum AncestorSearchState { fn handle_ancestor_search_state( state: &AncestorSearchState, curr_block_num: NumberFor, - block_hash_match: bool + block_hash_match: bool, ) -> Option<(AncestorSearchState, NumberFor)> { let two = >::one() + >::one(); match state { @@ -1672,44 +1691,41 @@ fn peer_block_request( if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. return None; - } - if peer.common_number < finalized { + } else if peer.common_number < finalized { trace!( target: "sync", "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", - id, finalized, peer.common_number, peer.best_number, best_num, + id, peer.common_number, finalized, peer.best_number, best_num, ); } - if let Some(range) = blocks.needed_blocks( + let range = blocks.needed_blocks( id.clone(), MAX_BLOCKS_TO_REQUEST, peer.best_number, peer.common_number, max_parallel_downloads, MAX_DOWNLOAD_AHEAD, - ) { - // The end is not part of the range. - let last = range.end.saturating_sub(One::one()); + )?; - let from = if peer.best_number == last { - message::FromBlock::Hash(peer.best_hash) - } else { - message::FromBlock::Number(last) - }; - - let request = message::generic::BlockRequest { - id: 0, - fields: attrs.clone(), - from, - to: None, - direction: message::Direction::Descending, - max: Some((range.end - range.start).saturated_into::()) - }; + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); - Some((range, request)) + let from = if peer.best_number == last { + message::FromBlock::Hash(peer.best_hash) } else { - None - } + message::FromBlock::Number(last) + }; + + let request = message::generic::BlockRequest { + id: 0, + fields: attrs.clone(), + from, + to: None, + direction: message::Direction::Descending, + max: Some((range.end - range.start).saturated_into::()) + }; + + Some((range, request)) } /// Get pending fork sync targets for a peer. @@ -1720,8 +1736,7 @@ fn fork_sync_request( finalized: NumberFor, attributes: &message::BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, -) -> Option<(B::Hash, BlockRequest)> -{ +) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { if r.number <= finalized { trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); @@ -1774,7 +1789,75 @@ fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Has Ok(ancestor.hash == *base) } -fn validate_blocks(blocks: &Vec>, who: &PeerId) -> Result<(), BadPeer> { +/// Validate that the given `blocks` are correct. +/// +/// It is expected that `blocks` are in asending order. +fn validate_blocks( + blocks: &Vec>, + who: &PeerId, + request: Option>, +) -> Result<(), BadPeer> { + if let Some(request) = request { + if Some(blocks.len() as _) > request.max { + debug!( + target: "sync", + "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", + who, + request.max, + blocks.len(), + ); + + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) + } + + let block_header = if request.direction == message::Direction::Descending { + blocks.last() + } else { + blocks.first() + }.and_then(|b| b.header.as_ref()); + + let expected_block = block_header.as_ref() + .map_or(false, |h| match request.from { + message::FromBlock::Hash(hash) => h.hash() == hash, + message::FromBlock::Number(n) => h.number() == &n, + }); + + if !expected_block { + debug!( + target: "sync", + "Received block that was not requested. Requested {:?}, got {:?}.", + request.from, + block_header, + ); + + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) + } + + if request.fields.contains(message::BlockAttributes::HEADER) + && blocks.iter().any(|b| b.header.is_none()) + { + trace!( + target: "sync", + "Missing requested header for a block in response from {}.", + who, + ); + + return Err(BadPeer(who.clone(), rep::BAD_RESPONSE)) + } + + if request.fields.contains(message::BlockAttributes::BODY) + && blocks.iter().any(|b| b.body.is_none()) + { + trace!( + target: "sync", + "Missing requested body for a block in response from {}.", + who, + ); + + return Err(BadPeer(who.clone(), rep::BAD_RESPONSE)) + } + } + for b in blocks { if let Some(header) = &b.header { let hash = header.hash(); @@ -1805,20 +1888,23 @@ fn validate_blocks(blocks: &Vec>, who: } } } + Ok(()) } #[cfg(test)] mod test { - use super::message::FromBlock; + use super::message::{FromBlock, BlockState, BlockData}; use super::*; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ - runtime::{Block, Hash}, + runtime::{Block, Hash, Header}, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + BlockBuilderExt, TestClient, ClientExt, }; + use futures::{future::poll_fn, executor::block_on}; #[test] fn processes_empty_response_on_justification_request_for_unknown_block() { @@ -1835,7 +1921,6 @@ mod test { Roles::AUTHORITY, client.clone(), &info, - None, block_announce_validator, 1, ); @@ -1907,7 +1992,6 @@ mod test { Roles::AUTHORITY, client.clone(), &info, - None, Box::new(DefaultBlockAnnounceValidator), 1, ); @@ -1915,7 +1999,6 @@ mod test { let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); let peer_id3 = PeerId::random(); - let peer_id4 = PeerId::random(); let mut new_blocks = |n| { for _ in 0..n { @@ -1928,7 +2011,6 @@ mod test { }; let (b1_hash, b1_number) = new_blocks(50); - let (b2_hash, b2_number) = new_blocks(10); // add 2 peers at blocks that we don't have locally sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap(); @@ -1958,38 +2040,453 @@ mod test { PeerSyncState::DownloadingJustification(b1_hash), ); - // add another peer at a known later block - sync.new_peer(peer_id4.clone(), b2_hash, b2_number).unwrap(); - - // we request a finality proof for a block we have locally - sync.request_finality_proof(&b2_hash, b2_number); - - // the finality proof request should be scheduled to peer 4 - // which is at that block - assert!( - sync.finality_proof_requests().any(|(p, r)| { p == peer_id4 && r.block == b2_hash }) - ); - - assert_eq!( - sync.peers.get(&peer_id4).unwrap().state, - PeerSyncState::DownloadingFinalityProof(b2_hash), - ); - // we restart the sync state let block_requests = sync.restart(); // which should make us send out block requests to the first two peers assert!(block_requests.map(|r| r.unwrap()).all(|(p, _)| { p == peer_id1 || p == peer_id2 })); - // peer 3 and 4 should be unaffected as they were downloading finality data + // peer 3 should be unaffected it was downloading finality data assert_eq!( sync.peers.get(&peer_id3).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); + } - assert_eq!( - sync.peers.get(&peer_id4).unwrap().state, - PeerSyncState::DownloadingFinalityProof(b2_hash), + /// Send a block annoucnement for the given `header`. + fn send_block_announce( + header: Header, + peer_id: &PeerId, + sync: &mut ChainSync, + ) { + let block_annnounce = BlockAnnounce { + header: header.clone(), + state: Some(BlockState::Best), + data: Some(Vec::new()), + }; + + sync.push_block_announce_validation( + peer_id.clone(), + header.hash(), + block_annnounce, + true, + ); + + // Poll until we have procssed the block announcement + block_on(poll_fn(|cx| loop { + if sync.poll_block_announce_validation(cx).is_pending() { + break Poll::Ready(()) + } + })) + } + + /// Create a block response from the given `blocks`. + fn create_block_response(blocks: Vec) -> BlockResponse { + BlockResponse:: { + id: 0, + blocks: blocks.into_iter().map(|b| + BlockData:: { + hash: b.hash(), + header: Some(b.header().clone()), + body: Some(b.deconstruct().1), + receipt: None, + message_queue: None, + justification: None, + } + ).collect(), + } + } + + /// Get a block request from `sync` and check that is matches the expected request. + fn get_block_request( + sync: &mut ChainSync, + from: FromBlock, + max: u32, + peer: &PeerId, + ) -> BlockRequest { + let requests = sync.block_requests().collect::>(); + + log::trace!(target: "sync", "Requests: {:?}", requests); + + assert_eq!(1, requests.len()); + assert_eq!(peer, requests[0].0); + + let request = requests[0].1.clone(); + + assert_eq!(from, request.from); + assert_eq!(Some(max), request.max); + request + } + + /// Build and import a new best block. + fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { + let at = at.unwrap_or_else(|| client.info().best_hash); + + let mut block_builder = client.new_block_at( + &BlockId::Hash(at), + Default::default(), + false, + ).unwrap(); + + if fork { + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + } + + let block = block_builder.build().unwrap().block; + + client.import(BlockOrigin::Own, block.clone()).unwrap(); + block + } + + /// This test is a regression test as observed on a real network. + /// + /// The node is connected to multiple peers. Both of these peers are having a best block (1) that + /// is below our best block (3). Now peer 2 announces a fork of block 3 that we will + /// request from peer 2. After imporitng the fork, peer 2 and then peer 1 will announce block 4. + /// But as peer 1 in our view is still at block 1, we will request block 2 (which we already have) + /// from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request for block + /// 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to succeed, as we + /// have requested block 2 from both peers. + #[test] + fn do_not_report_peer_on_block_response_for_block_request() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + + let mut sync = ChainSync::new( + Roles::AUTHORITY, + client.clone(), + &info, + Box::new(DefaultBlockAnnounceValidator), + 5, + ); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let mut client2 = client.clone(); + let mut build_block_at = |at, import| { + let mut block_builder = client2.new_block_at(&BlockId::Hash(at), Default::default(), false) + .unwrap(); + // Make sure we generate a different block as fork + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + + let block = block_builder.build().unwrap().block; + + if import { + client2.import(BlockOrigin::Own, block.clone()).unwrap(); + } + + block + }; + + let block1 = build_block(&mut client, None, false); + let block2 = build_block(&mut client, None, false); + let block3 = build_block(&mut client, None, false); + let block3_fork = build_block_at(block2.hash(), false); + + // Add two peers which are on block 1. + sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap(); + + // Tell sync that our best block is 3. + sync.update_chain_info(&block3.hash(), 3); + + // There should be no requests. + assert!(sync.block_requests().collect::>().is_empty()); + + // Let peer2 announce a fork of block 3 + send_block_announce(block3_fork.header().clone(), &peer_id2, &mut sync); + + // Import and tell sync that we now have the fork. + client.import(BlockOrigin::Own, block3_fork.clone()).unwrap(); + sync.update_chain_info(&block3_fork.hash(), 3); + + let block4 = build_block_at(block3_fork.hash(), false); + + // Let peer2 announce block 4 and check that sync wants to get the block. + send_block_announce(block4.header().clone(), &peer_id2, &mut sync); + + let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); + + // Peer1 announces the same block, but as the common block is still `1`, sync will request + // block 2 again. + send_block_announce(block4.header().clone(), &peer_id1, &mut sync); + + let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); + + let response = create_block_response(vec![block4.clone(), block3_fork.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request), response).unwrap(); + + // We should not yet import the blocks, because there is still an open request for fetching + // block `2` which blocks the import. + assert!(matches!(res, OnBlockData::Import(_, blocks) if blocks.is_empty())); + + let request3 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id2); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); + assert!( + matches!( + res, + OnBlockData::Import(_, blocks) + if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) + ) + ); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); + // Nothing to import + assert!(matches!(res, OnBlockData::Import(_, blocks) if blocks.is_empty())); + } + + fn unwrap_from_block_number(from: FromBlock) -> u64 { + if let FromBlock::Number(from) = from { + from + } else { + panic!("Expected a number!"); + } + } + + /// A regression test for a behavior we have seen on a live network. + /// + /// The scenario is that the node is doing a full resync and is connected to some node that is + /// doing a major sync as well. This other node that is doing a major sync will finish before + /// our node and send a block announcement message, but we don't have seen any block announcement + /// from this node in its sync process. Meaning our common number didn't change. It is now expected + /// that we start an ancestor search to find the common number. + #[test] + fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { + sp_tracing::try_init_simple(); + + let blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + (0..MAX_DOWNLOAD_AHEAD * 2).map(|_| build_block(&mut client, None, false)).collect::>() + }; + + let mut client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + + let mut sync = ChainSync::new( + Roles::AUTHORITY, + client.clone(), + &info, + Box::new(DefaultBlockAnnounceValidator), + 5, + ); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let best_block = blocks.last().unwrap().clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()).unwrap(); + sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap(); + + let mut best_block_num = 0; + while best_block_num < MAX_DOWNLOAD_AHEAD { + let request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert!( + matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ), + ); + + best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + + resp_blocks.into_iter() + .rev() + .for_each(|b| client.import_as_final(BlockOrigin::Own, b).unwrap()); + } + + // Let peer2 announce that it finished syncing + send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); + + let (peer1_req, peer2_req) = sync.block_requests().fold((None, None), |res, req| { + if req.0 == &peer_id1 { + (Some(req.1), res.1) + } else if req.0 == &peer_id2 { + (res.0, Some(req.1)) + } else { + panic!("Unexpected req: {:?}", req) + } + }); + + // We should now do an ancestor search to find the correct common block. + let peer2_req = peer2_req.unwrap(); + assert_eq!(Some(1), peer2_req.max); + assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); + + let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); + let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + assert!( + matches!( + res, + OnBlockData::Import(_, blocks) if blocks.is_empty() + ), + ); + + let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); + + // As we are on the same chain, we should directly continue with requesting blocks from + // peer 2 as well. + get_block_request( + &mut sync, + FromBlock::Number(peer1_from + MAX_BLOCKS_TO_REQUEST as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id2, + ); + } + + /// A test that ensures that we can sync a huge fork. + /// + /// The following scenario: + /// A peer connects to us and we both have the common block 512. The last finalized is 2048. + /// Our best block is 4096. The peer send us a block announcement with 4097 from a fork. + /// + /// We will first do an ancestor search to find the common block. After that we start to sync + /// the fork and finish it ;) + #[test] + fn can_sync_huge_fork() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| client.import(BlockOrigin::Own, (*b).clone()).unwrap()) + .cloned() + .collect::>(); + + fork_blocks.into_iter().chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)) + ).collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new( + Roles::AUTHORITY, + client.clone(), + &info, + Box::new(DefaultBlockAnnounceValidator), + 5, + ); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + client.finalize_block(BlockId::Hash(finalized_block.hash()), Some(Vec::new())).unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()).unwrap(); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); + + let mut request = get_block_request( + &mut sync, + FromBlock::Number(info.best_number), + 1, + &peer_id1, + ); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + request = match on_block_data.into_request() { + Some(req) => req.1, + // We found the ancenstor + None => break, + }; + + log::trace!(target: "sync", "Request: {:?}", request); + } + + // Now request and import the fork. + let mut best_block_num = finalized_block.header().number().clone() as u32; + while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { + let request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert!( + matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ), + ); + + best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + resp_blocks.iter() + .rev() + .map(|b| + ( + Ok( + BlockImportResult::ImportedUnknown( + b.header().number().clone(), + Default::default(), + Some(peer_id1.clone()), + ) + ), + b.hash(), + ) + ) + .collect() + ); + + resp_blocks.into_iter() + .rev() + .for_each(|b| client.import(BlockOrigin::Own, b).unwrap()); + } + + // Request the tip + get_block_request( + &mut sync, + FromBlock::Hash(fork_blocks.last().unwrap().hash()), + 1, + &peer_id1, ); } } diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index b64c9e053e97b..60492f24ed8c3 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index df336c25339fd..3de79b3f48734 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -345,8 +345,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { mod tests { use crate::protocol::sync::PeerSync; use sp_blockchain::Error as ClientError; - use quickcheck::{Arbitrary, Gen, QuickCheck, StdThreadGen}; - use rand::Rng; + use quickcheck::{Arbitrary, Gen, QuickCheck}; use std::collections::{HashMap, HashSet}; use super::*; use sp_test_primitives::{Block, BlockNumber, Hash}; @@ -373,7 +372,7 @@ mod tests { } } - QuickCheck::with_gen(StdThreadGen::new(19)) + QuickCheck::new() .quickcheck(property as fn(ArbitraryPeers)) } @@ -425,7 +424,7 @@ mod tests { previously_active == requests.pending_requests.iter().cloned().collect::>() } - QuickCheck::with_gen(StdThreadGen::new(19)) + QuickCheck::new() .quickcheck(property as fn(ArbitraryPeers) -> bool) } @@ -457,7 +456,7 @@ mod tests { } } - QuickCheck::with_gen(StdThreadGen::new(19)) + QuickCheck::new() .quickcheck(property as fn(ArbitraryPeers)) } @@ -527,14 +526,13 @@ mod tests { struct ArbitraryPeerSyncState(PeerSyncState); impl Arbitrary for ArbitraryPeerSyncState { - fn arbitrary(g: &mut G) -> Self { - let s = match g.gen::() % 5 { + fn arbitrary(g: &mut Gen) -> Self { + let s = match u8::arbitrary(g) % 4 { 0 => PeerSyncState::Available, // TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState), - 1 => PeerSyncState::DownloadingNew(g.gen::()), + 1 => PeerSyncState::DownloadingNew(BlockNumber::arbitrary(g)), 2 => PeerSyncState::DownloadingStale(Hash::random()), - 3 => PeerSyncState::DownloadingJustification(Hash::random()), - _ => PeerSyncState::DownloadingFinalityProof(Hash::random()) + _ => PeerSyncState::DownloadingJustification(Hash::random()), }; ArbitraryPeerSyncState(s) } @@ -544,13 +542,13 @@ mod tests { struct ArbitraryPeerSync(PeerSync); impl Arbitrary for ArbitraryPeerSync { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let ps = PeerSync { - common_number: g.gen(), + peer_id: PeerId::random(), + common_number: u64::arbitrary(g), best_hash: Hash::random(), - best_number: g.gen(), + best_number: u64::arbitrary(g), state: ArbitraryPeerSyncState::arbitrary(g).0, - recently_announced: Default::default() }; ArbitraryPeerSync(ps) } @@ -560,13 +558,13 @@ mod tests { struct ArbitraryPeers(HashMap>); impl Arbitrary for ArbitraryPeers { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let mut peers = HashMap::with_capacity(g.size()); for _ in 0 .. g.size() { - peers.insert(PeerId::random(), ArbitraryPeerSync::arbitrary(g).0); + let ps = ArbitraryPeerSync::arbitrary(g).0; + peers.insert(ps.peer_id.clone(), ps); } ArbitraryPeers(peers) } } - } diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 5e414248674f0..4d478ea7afd65 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Collection of request-response protocols. //! @@ -52,6 +54,8 @@ use std::{ borrow::Cow, collections::{hash_map::Entry, HashMap}, convert::TryFrom as _, io, iter, pin::Pin, task::{Context, Poll}, time::Duration, }; +use wasm_timer::Instant; +use crate::ReputationChange; pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; @@ -111,8 +115,27 @@ pub struct IncomingRequest { /// [`ProtocolConfig::max_request_size`]. pub payload: Vec, - /// Channel to send back the response to. - pub pending_response: oneshot::Sender>, + /// Channel to send back the response. + /// + /// There are two ways to indicate that handling the request failed: + /// + /// 1. Drop `pending_response` and thus not changing the reputation of the peer. + /// + /// 2. Sending an `Err(())` via `pending_response`, optionally including reputation changes for + /// the given peer. + pub pending_response: oneshot::Sender, +} + +/// Response for an incoming request to be send by a request protocol handler. +#[derive(Debug)] +pub struct OutgoingResponse { + /// The payload of the response. + /// + /// `Err(())` if none is available e.g. due an error while handling the request. + pub result: Result, ()>, + /// Reputation changes accrued while handling the request. To be applied to the reputation of + /// the peer sending the request. + pub reputation_changes: Vec, } /// Event generated by the [`RequestResponsesBehaviour`]. @@ -126,19 +149,70 @@ pub enum Event { peer: PeerId, /// Name of the protocol in question. protocol: Cow<'static, str>, - /// If `Ok`, contains the time elapsed between when we received the request and when we - /// sent back the response. If `Err`, the error that happened. + /// Whether handling the request was successful or unsuccessful. + /// + /// When successful contains the time elapsed between when we received the request and when + /// we sent back the response. When unsuccessful contains the failure reason. result: Result, }, /// A request initiated using [`RequestResponsesBehaviour::send_request`] has succeeded or /// failed. + /// + /// This event is generated for statistics purposes. RequestFinished { - /// Request that has succeeded. - request_id: RequestId, - /// Response sent by the remote or reason for failure. - result: Result, RequestFailure>, + /// Peer that we send a request to. + peer: PeerId, + /// Name of the protocol in question. + protocol: Cow<'static, str>, + /// Duration the request took. + duration: Duration, + /// Result of the request. + result: Result<(), RequestFailure> }, + + /// A request protocol handler issued reputation changes for the given peer. + ReputationChanges { + peer: PeerId, + changes: Vec, + } +} + +/// Combination of a protocol name and a request id. +/// +/// Uniquely identifies an inbound or outbound request among all handled protocols. Note however +/// that uniqueness is only guaranteed between two inbound and likewise between two outbound +/// requests. There is no uniqueness guarantee in a set of both inbound and outbound +/// [`ProtocolRequestId`]s. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct ProtocolRequestId { + protocol: Cow<'static, str>, + request_id: RequestId, +} + +impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { + fn from((protocol, request_id): (Cow<'static, str>, RequestId)) -> Self { + Self { protocol, request_id } + } +} + +/// When sending a request, what to do on a disconnected recipient. +pub enum IfDisconnected { + /// Try to connect to the peer. + TryConnect, + /// Just fail if the destination is not yet connected. + ImmediateError, +} + +/// Convenience functions for `IfDisconnected`. +impl IfDisconnected { + /// Shall we connect to a disconnected peer? + pub fn should_connect(self) -> bool { + match self { + Self::TryConnect => true, + Self::ImmediateError => false, + } + } } /// Implementation of `NetworkBehaviour` that provides support for request-response protocols. @@ -151,24 +225,29 @@ pub struct RequestResponsesBehaviour { (RequestResponse, Option>) >, + /// Pending requests, passed down to a [`RequestResponse`] behaviour, awaiting a reply. + pending_requests: HashMap< + ProtocolRequestId, + (Instant, oneshot::Sender, RequestFailure>>), + >, + /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the - /// response to send back to the remote. + /// start time and the response to send back to the remote. pending_responses: stream::FuturesUnordered< - Pin + Send>> + Pin> + Send>> >, + + /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. + pending_responses_arrival_time: HashMap, } /// Generated by the response builder and waiting to be processed. -enum RequestProcessingOutcome { - Response { - protocol: Cow<'static, str>, - inner_channel: ResponseChannel, ()>>, - response: Vec, - }, - Busy { - peer: PeerId, - protocol: Cow<'static, str>, - }, +struct RequestProcessingOutcome { + peer: PeerId, + request_id: RequestId, + protocol: Cow<'static, str>, + inner_channel: ResponseChannel, ()>>, + response: OutgoingResponse, } impl RequestResponsesBehaviour { @@ -201,25 +280,52 @@ impl RequestResponsesBehaviour { Ok(Self { protocols, - pending_responses: stream::FuturesUnordered::new(), + pending_requests: Default::default(), + pending_responses: Default::default(), + pending_responses_arrival_time: Default::default(), }) } /// Initiates sending a request. /// - /// An error is returned if we are not connected to the target peer or if the protocol doesn't - /// match one that has been registered. - pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) - -> Result - { - if let Some((protocol, _)) = self.protocols.get_mut(protocol) { - if protocol.is_connected(target) { - Ok(protocol.send_request(target, request)) + /// If there is no established connection to the target peer, the behavior is determined by the choice of `connect`. + /// + /// An error is returned if the protocol doesn't match one that has been registered. + pub fn send_request( + &mut self, + target: &PeerId, + protocol_name: &str, + request: Vec, + pending_response: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + if let Some((protocol, _)) = self.protocols.get_mut(protocol_name) { + if protocol.is_connected(target) || connect.should_connect() { + let request_id = protocol.send_request(target, request); + let prev_req_id = self.pending_requests.insert( + (protocol_name.to_string().into(), request_id).into(), + (Instant::now(), pending_response), + ); + debug_assert!(prev_req_id.is_none(), "Expect request id to be unique."); } else { - Err(SendRequestError::NotConnected) + if pending_response.send(Err(RequestFailure::NotConnected)).is_err() { + log::debug!( + target: "sub-libp2p", + "Not connected to peer {:?}. At the same time local \ + node is no longer interested in the result.", + target, + ); + }; } } else { - Err(SendRequestError::UnknownProtocol) + if pending_response.send(Err(RequestFailure::UnknownProtocol)).is_err() { + log::debug!( + target: "sub-libp2p", + "Unknown protocol {:?}. At the same time local \ + node is no longer interested in the result.", + protocol_name, + ); + }; } } } @@ -347,23 +453,46 @@ impl NetworkBehaviour for RequestResponsesBehaviour { > { 'poll_all: loop { // Poll to see if any response is ready to be sent back. - while let Poll::Ready(Some(result)) = self.pending_responses.poll_next_unpin(cx) { - match result { - RequestProcessingOutcome::Response { - protocol, inner_channel, response - } => { - if let Some((protocol, _)) = self.protocols.get_mut(&*protocol) { - protocol.send_response(inner_channel, Ok(response)); + while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { + let RequestProcessingOutcome { + peer, + request_id, + protocol: protocol_name, + inner_channel, + response: OutgoingResponse { + result, + reputation_changes, + }, + } = match outcome { + Some(outcome) => outcome, + // The response builder was too busy or handling the request failed. This is + // later on reported as a `InboundFailure::Omission`. + None => continue, + }; + + if let Ok(payload) = result { + if let Some((protocol, _)) = self.protocols.get_mut(&*protocol_name) { + if let Err(_) = protocol.send_response(inner_channel, Ok(payload)) { + // Note: Failure is handled further below when receiving + // `InboundFailure` event from `RequestResponse` behaviour. + log::debug!( + target: "sub-libp2p", + "Failed to send response for {:?} on protocol {:?} due to a \ + timeout or due to the connection to the peer being closed. \ + Dropping response", + request_id, protocol_name, + ); } } - RequestProcessingOutcome::Busy { peer, protocol } => { - let out = Event::InboundRequest { + } + + if !reputation_changes.is_empty() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent( + Event::ReputationChanges{ peer, - protocol, - result: Err(ResponseFailure::Busy), - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + changes: reputation_changes, + }, + )); } } @@ -381,7 +510,6 @@ impl NetworkBehaviour for RequestResponsesBehaviour { return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) } NetworkBehaviourAction::DialPeer { peer_id, condition } => { - log::error!("The request-response isn't supposed to start dialing peers"); return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, @@ -398,9 +526,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { event: ((*protocol).to_string(), event), }) } - NetworkBehaviourAction::ReportObservedAddr { address } => { + NetworkBehaviourAction::ReportObservedAddr { address, score } => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, + address, score, }) } }; @@ -409,32 +537,41 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Received a request from a remote. RequestResponseEvent::Message { peer, - message: RequestResponseMessage::Request { request, channel, .. }, + message: RequestResponseMessage::Request { request_id, request, channel, .. }, } => { + self.pending_responses_arrival_time.insert( + (protocol.clone(), request_id.clone()).into(), + Instant::now(), + ); + let (tx, rx) = oneshot::channel(); // Submit the request to the "response builder" passed by the user at // initialization. if let Some(resp_builder) = resp_builder { - // If the response builder is too busy, silently drop `tx`. - // This will be reported as a `Busy` error. + // If the response builder is too busy, silently drop `tx`. This + // will be reported by the corresponding `RequestResponse` through + // an `InboundFailure::Omission` event. let _ = resp_builder.try_send(IncomingRequest { peer: peer.clone(), payload: request, pending_response: tx, }); + } else { + debug_assert!(false, "Received message on outbound-only protocol."); } let protocol = protocol.clone(); self.pending_responses.push(Box::pin(async move { // The `tx` created above can be dropped if we are not capable of - // processing this request, which is reflected as a "Busy" error. + // processing this request, which is reflected as a + // `InboundFailure::Omission` event. if let Ok(response) = rx.await { - RequestProcessingOutcome::Response { - protocol, inner_channel: channel, response - } + Some(RequestProcessingOutcome { + peer, request_id, protocol, inner_channel: channel, response + }) } else { - RequestProcessingOutcome::Busy { peer, protocol } + None } })); @@ -445,35 +582,91 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Received a response from a remote to one of our requests. RequestResponseEvent::Message { - message: - RequestResponseMessage::Response { - request_id, - response, - }, + peer, + message: RequestResponseMessage::Response { + request_id, + response, + }, .. } => { + let (started, delivered) = match self.pending_requests.remove( + &(protocol.clone(), request_id).into(), + ) { + Some((started, pending_response)) => { + let delivered = pending_response.send( + response.map_err(|()| RequestFailure::Refused), + ).map_err(|_| RequestFailure::Obsolete); + (started, delivered) + } + None => { + log::warn!( + target: "sub-libp2p", + "Received `RequestResponseEvent::Message` with unexpected request id {:?}", + request_id, + ); + debug_assert!(false); + continue; + } + }; + let out = Event::RequestFinished { - request_id, - result: response.map_err(|()| RequestFailure::Refused), + peer, + protocol: protocol.clone(), + duration: started.elapsed(), + result: delivered, }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); } // One of our requests has failed. RequestResponseEvent::OutboundFailure { + peer, request_id, error, .. } => { + let started = match self.pending_requests.remove(&(protocol.clone(), request_id).into()) { + Some((started, pending_response)) => { + if pending_response.send( + Err(RequestFailure::Network(error.clone())), + ).is_err() { + log::debug!( + target: "sub-libp2p", + "Request with id {:?} failed. At the same time local \ + node is no longer interested in the result.", + request_id, + ); + } + started + } + None => { + log::warn!( + target: "sub-libp2p", + "Received `RequestResponseEvent::Message` with unexpected request id {:?}", + request_id, + ); + debug_assert!(false); + continue; + } + }; + let out = Event::RequestFinished { - request_id, + peer, + protocol: protocol.clone(), + duration: started.elapsed(), result: Err(RequestFailure::Network(error)), }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); } - // Remote has tried to send a request but failed. - RequestResponseEvent::InboundFailure { peer, error, .. } => { + // An inbound request failed, either while reading the request or due to failing + // to send a response. + RequestResponseEvent::InboundFailure { request_id, peer, error, .. } => { + self.pending_responses_arrival_time.remove( + &(protocol.clone(), request_id).into(), + ); let out = Event::InboundRequest { peer, protocol: protocol.clone(), @@ -481,6 +674,29 @@ impl NetworkBehaviour for RequestResponsesBehaviour { }; return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); } + + // A response to an inbound request has been sent. + RequestResponseEvent::ResponseSent { request_id, peer } => { + let arrival_time = self.pending_responses_arrival_time.remove( + &(protocol.clone(), request_id).into(), + ) + .map(|t| t.elapsed()) + .expect( + "Time is added for each inbound request on arrival and only \ + removed on success (`ResponseSent`) or failure \ + (`InboundFailure`). One can not receive a success event for a \ + request that either never arrived, or that has previously \ + failed; qed.", + ); + + let out = Event::InboundRequest { + peer, + protocol: protocol.clone(), + result: Ok(arrival_time), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + + } }; } } @@ -497,21 +713,18 @@ pub enum RegisterError { DuplicateProtocol(#[error(ignore)] Cow<'static, str>), } -/// Error when sending a request. +/// Error in a request. #[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum SendRequestError { +pub enum RequestFailure { /// We are not currently connected to the requested peer. NotConnected, /// Given protocol hasn't been registered. UnknownProtocol, -} - -/// Error in a request. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum RequestFailure { /// Remote has closed the substream before answering, thereby signaling that it considers the /// request as valid, but refused to answer it. Refused, + /// The remote replied, but the local node is no longer interested in the response. + Obsolete, /// Problem on the network. #[display(fmt = "Problem on the network")] Network(#[error(ignore)] OutboundFailure), @@ -520,8 +733,6 @@ pub enum RequestFailure { /// Error when processing a request sent by a remote. #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum ResponseFailure { - /// Internal response builder is too busy to process this request. - Busy, /// Problem on the network. #[display(fmt = "Problem on the network")] Network(#[error(ignore)] InboundFailure), @@ -655,7 +866,11 @@ impl RequestResponseCodec for GenericCodec { #[cfg(test)] mod tests { - use futures::{channel::mpsc, prelude::*}; + use super::*; + + use futures::channel::{mpsc, oneshot}; + use futures::executor::LocalPool; + use futures::task::Spawn; use libp2p::identity::Keypair; use libp2p::Multiaddr; use libp2p::core::upgrade; @@ -664,51 +879,57 @@ mod tests { use libp2p::swarm::{Swarm, SwarmEvent}; use std::{iter, time::Duration}; + fn build_swarm(list: impl Iterator) -> (Swarm, Multiaddr) { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p::yamux::YamuxConfig::default()) + .boxed(); + + let behaviour = RequestResponsesBehaviour::new(list).unwrap(); + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + } + #[test] fn basic_request_response_works() { - let protocol_name = "/test/req-rep/1"; + let protocol_name = "/test/req-resp/1"; + let mut pool = LocalPool::new(); // Build swarms whose behaviour is `RequestResponsesBehaviour`. let mut swarms = (0..2) .map(|_| { - let keypair = Keypair::generate_ed25519(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::yamux::Config::default()) - .boxed(); - - let behaviour = { - let (tx, mut rx) = mpsc::channel(64); - - let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { - name: From::from(protocol_name), - max_request_size: 1024, - max_response_size: 1024 * 1024, - request_timeout: Duration::from_secs(30), - inbound_queue: Some(tx), - })).unwrap(); - - async_std::task::spawn(async move { - while let Some(rq) = rx.next().await { - assert_eq!(rq.payload, b"this is a request"); - let _ = rq.pending_response.send(b"this is a response".to_vec()); - } - }); - - b + let (tx, mut rx) = mpsc::channel::(64); + + pool.spawner().spawn_obj(async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(super::OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + }); + } + }.boxed().into()).unwrap(); + + let protocol_config = ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), }; - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); - (swarm, listen_addr) + build_swarm(iter::once(protocol_config)) }) .collect::>(); @@ -719,99 +940,83 @@ mod tests { Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); } - // Running `swarm[0]` in the background until a `InboundRequest` event happens, - // which is a hint about the test having ended. - async_std::task::spawn({ + // Running `swarm[0]` in the background. + pool.spawner().spawn_obj({ let (mut swarm, _) = swarms.remove(0); async move { loop { match swarm.next_event().await { - SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { - assert!(result.is_ok()); - break + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + result.unwrap(); }, _ => {} } } - } - }); + }.boxed().into() + }).unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); - async_std::task::block_on(async move { - let mut sent_request_id = None; + pool.run_until(async move { + let mut response_receiver = None; loop { match swarm.next_event().await { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - let id = swarm.send_request( + let (sender, receiver) = oneshot::channel(); + swarm.send_request( &peer_id, protocol_name, - b"this is a request".to_vec() - ).unwrap(); - assert!(sent_request_id.is_none()); - sent_request_id = Some(id); + b"this is a request".to_vec(), + sender, + IfDisconnected::ImmediateError, + ); + assert!(response_receiver.is_none()); + response_receiver = Some(receiver); } - SwarmEvent::Behaviour(super::Event::RequestFinished { - request_id, - result, + SwarmEvent::Behaviour(Event::RequestFinished { + result, .. }) => { - assert_eq!(Some(request_id), sent_request_id); - let result = result.unwrap(); - assert_eq!(result, b"this is a response"); + result.unwrap(); break; } _ => {} } } + + assert_eq!(response_receiver.unwrap().await.unwrap().unwrap(), b"this is a response"); }); } #[test] fn max_response_size_exceeded() { - let protocol_name = "/test/req-rep/1"; + let protocol_name = "/test/req-resp/1"; + let mut pool = LocalPool::new(); // Build swarms whose behaviour is `RequestResponsesBehaviour`. let mut swarms = (0..2) .map(|_| { - let keypair = Keypair::generate_ed25519(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::yamux::Config::default()) - .boxed(); - - let behaviour = { - let (tx, mut rx) = mpsc::channel(64); - - let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { - name: From::from(protocol_name), - max_request_size: 1024, - max_response_size: 8, // <-- important for the test - request_timeout: Duration::from_secs(30), - inbound_queue: Some(tx), - })).unwrap(); - - async_std::task::spawn(async move { - while let Some(rq) = rx.next().await { - assert_eq!(rq.payload, b"this is a request"); - let _ = rq.pending_response.send(b"this response exceeds the limit".to_vec()); - } - }); - - b + let (tx, mut rx) = mpsc::channel::(64); + + pool.spawner().spawn_obj(async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(super::OutgoingResponse { + result: Ok(b"this response exceeds the limit".to_vec()), + reputation_changes: Vec::new(), + }); + } + }.boxed().into()).unwrap(); + + let protocol_config = ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 8, // <-- important for the test + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), }; - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); - (swarm, listen_addr) + build_swarm(iter::once(protocol_config)) }) .collect::>(); @@ -824,51 +1029,206 @@ mod tests { // Running `swarm[0]` in the background until a `InboundRequest` event happens, // which is a hint about the test having ended. - async_std::task::spawn({ + pool.spawner().spawn_obj({ let (mut swarm, _) = swarms.remove(0); async move { loop { match swarm.next_event().await { - SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { assert!(result.is_ok()); break }, _ => {} } } - } - }); + }.boxed().into() + }).unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); - async_std::task::block_on(async move { - let mut sent_request_id = None; + pool.run_until(async move { + let mut response_receiver = None; loop { match swarm.next_event().await { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - let id = swarm.send_request( + let (sender, receiver) = oneshot::channel(); + swarm.send_request( &peer_id, protocol_name, - b"this is a request".to_vec() - ).unwrap(); - assert!(sent_request_id.is_none()); - sent_request_id = Some(id); + b"this is a request".to_vec(), + sender, + IfDisconnected::ImmediateError, + ); + assert!(response_receiver.is_none()); + response_receiver = Some(receiver); } - SwarmEvent::Behaviour(super::Event::RequestFinished { - request_id, - result, + SwarmEvent::Behaviour(Event::RequestFinished { + result, .. }) => { - assert_eq!(Some(request_id), sent_request_id); - match result { - Err(super::RequestFailure::Network(super::OutboundFailure::ConnectionClosed)) => {}, - _ => panic!() - } + assert!(result.is_err()); break; } _ => {} } } + + match response_receiver.unwrap().await.unwrap().unwrap_err() { + RequestFailure::Network(OutboundFailure::ConnectionClosed) => {}, + _ => panic!() + } }); } + + /// A [`RequestId`] is a unique identifier among either all inbound or all outbound requests for + /// a single [`RequestResponse`] behaviour. It is not guaranteed to be unique across multiple + /// [`RequestResponse`] behaviours. Thus when handling [`RequestId`] in the context of multiple + /// [`RequestResponse`] behaviours, one needs to couple the protocol name with the [`RequestId`] + /// to get a unique request identifier. + /// + /// This test ensures that two requests on different protocols can be handled concurrently + /// without a [`RequestId`] collision. + /// + /// See [`ProtocolRequestId`] for additional information. + #[test] + fn request_id_collision() { + let protocol_name_1 = "/test/req-resp-1/1"; + let protocol_name_2 = "/test/req-resp-2/1"; + let mut pool = LocalPool::new(); + + let mut swarm_1 = { + let protocol_configs = vec![ + ProtocolConfig { + name: From::from(protocol_name_1), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: None, + }, + ProtocolConfig { + name: From::from(protocol_name_2), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: None, + }, + ]; + + build_swarm(protocol_configs.into_iter()).0 + }; + + let (mut swarm_2, mut swarm_2_handler_1, mut swarm_2_handler_2, listen_add_2) = { + let (tx_1, rx_1) = mpsc::channel(64); + let (tx_2, rx_2) = mpsc::channel(64); + + let protocol_configs = vec![ + ProtocolConfig { + name: From::from(protocol_name_1), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx_1), + }, + ProtocolConfig { + name: From::from(protocol_name_2), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx_2), + }, + ]; + + let (swarm, listen_addr) = build_swarm(protocol_configs.into_iter()); + + (swarm, rx_1, rx_2, listen_addr) + }; + + // Ask swarm 1 to dial swarm 2. There isn't any discovery mechanism in place in this test, + // so they wouldn't connect to each other. + Swarm::dial_addr(&mut swarm_1, listen_add_2).unwrap(); + + // Run swarm 2 in the background, receiving two requests. + pool.spawner().spawn_obj( + async move { + loop { + match swarm_2.next_event().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + result.unwrap(); + }, + _ => {} + } + } + }.boxed().into() + ).unwrap(); + + // Handle both requests sent by swarm 1 to swarm 2 in the background. + // + // Make sure both requests overlap, by answering the first only after receiving the + // second. + pool.spawner().spawn_obj(async move { + let protocol_1_request = swarm_2_handler_1.next().await; + let protocol_2_request = swarm_2_handler_2.next().await; + + protocol_1_request.unwrap() + .pending_response + .send(OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + }) + .unwrap(); + protocol_2_request.unwrap() + .pending_response + .send(OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + }) + .unwrap(); + }.boxed().into()).unwrap(); + + // Have swarm 1 send two requests to swarm 2 and await responses. + pool.run_until( + async move { + let mut response_receivers = None; + let mut num_responses = 0; + + loop { + match swarm_1.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let (sender_1, receiver_1) = oneshot::channel(); + let (sender_2, receiver_2) = oneshot::channel(); + swarm_1.send_request( + &peer_id, + protocol_name_1, + b"this is a request".to_vec(), + sender_1, + IfDisconnected::ImmediateError, + ); + swarm_1.send_request( + &peer_id, + protocol_name_2, + b"this is a request".to_vec(), + sender_2, + IfDisconnected::ImmediateError, + ); + assert!(response_receivers.is_none()); + response_receivers = Some((receiver_1, receiver_2)); + } + SwarmEvent::Behaviour(Event::RequestFinished { + result, .. + }) => { + num_responses += 1; + result.unwrap(); + if num_responses == 2 { + break; + } + } + _ => {} + } + } + let (response_receiver_1, response_receiver_2) = response_receivers.unwrap(); + assert_eq!(response_receiver_1.await.unwrap().unwrap(), b"this is a response"); + assert_eq!(response_receiver_2.await.unwrap().unwrap(), b"this is a response"); + } + ); + } } diff --git a/client/network/src/schema.rs b/client/network/src/schema.rs index 44fbbffd25406..d4572fca7594c 100644 --- a/client/network/src/schema.rs +++ b/client/network/src/schema.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,10 +20,11 @@ pub mod v1 { include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); - pub mod finality { - include!(concat!(env!("OUT_DIR"), "/api.v1.finality.rs")); - } pub mod light { include!(concat!(env!("OUT_DIR"), "/api.v1.light.rs")); } } + +pub mod bitswap { + include!(concat!(env!("OUT_DIR"), "/bitswap.message.rs")); +} diff --git a/client/network/src/schema/bitswap.v1.2.0.proto b/client/network/src/schema/bitswap.v1.2.0.proto new file mode 100644 index 0000000000000..a4138b516d63d --- /dev/null +++ b/client/network/src/schema/bitswap.v1.2.0.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package bitswap.message; + +message Message { + message Wantlist { + enum WantType { + Block = 0; + Have = 1; + } + + message Entry { + bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + int32 priority = 2; // the priority (normalized). default to 1 + bool cancel = 3; // whether this revokes an entry + WantType wantType = 4; // Note: defaults to enum 0, ie Block + bool sendDontHave = 5; // Note: defaults to false + } + + repeated Entry entries = 1; // a list of wantlist entries + bool full = 2; // whether this is the full wantlist. default to false + } + + message Block { + bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) + bytes data = 2; + } + + enum BlockPresenceType { + Have = 0; + DontHave = 1; + } + message BlockPresence { + bytes cid = 1; + BlockPresenceType type = 2; + } + + Wantlist wantlist = 1; + repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 + repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0 + repeated BlockPresence blockPresences = 4; + int32 pendingBytes = 5; +} diff --git a/client/network/src/schema/finality.v1.proto b/client/network/src/schema/finality.v1.proto deleted file mode 100644 index 843bc4eca0990..0000000000000 --- a/client/network/src/schema/finality.v1.proto +++ /dev/null @@ -1,19 +0,0 @@ -// Schema definition for finality proof request/responses. - -syntax = "proto3"; - -package api.v1.finality; - -// Request a finality proof from a peer. -message FinalityProofRequest { - // SCALE-encoded hash of the block to request. - bytes block_hash = 1; - // Opaque chain-specific additional request data. - bytes request = 2; -} - -// Response to a finality proof request. -message FinalityProofResponse { - // Opaque chain-specific finality proof. Empty if no such proof exists. - bytes proof = 1; // optional -} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 93abbbad02495..39eaa606d0066 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -30,7 +30,7 @@ use crate::{ ExHashT, NetworkStateInfo, NetworkStatus, behaviour::{self, Behaviour, BehaviourOut}, - config::{parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, + config::{parse_str_addr, Params, Role, TransportConfig}, DhtEvent, discovery::DiscoveryConfig, error::Error, @@ -38,30 +38,56 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, on_demand_layer::AlwaysBadChecker, - light_client_handler, block_requests, finality_requests, - protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, + light_client_requests, + protocol::{ + self, + NotifsHandlerError, + NotificationsSink, + PeerInfo, + Protocol, + Ready, + event::Event, + sync::SyncState, + }, transport, ReputationChange, + bitswap::Bitswap, }; use futures::{channel::oneshot, prelude::*}; use libp2p::{PeerId, multiaddr, Multiaddr}; -use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; +use libp2p::core::{ + ConnectedPoint, + Executor, + connection::{ + ConnectionLimits, + ConnectionError, + PendingConnectionError + }, + either::EitherError, + upgrade +}; use libp2p::kad::record; use libp2p::ping::handler::PingFailure; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; +use libp2p::swarm::{ + AddressScore, + NetworkBehaviour, + SwarmBuilder, + SwarmEvent, + protocols_handler::NodeHandlerWrapperError +}; use log::{error, info, trace, warn}; use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - ConsensusEngineId, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ borrow::Cow, + cmp, collections::{HashMap, HashSet}, + convert::TryFrom as _, fs, + iter, marker::PhantomData, num:: NonZeroUsize, pin::Pin, @@ -72,9 +98,8 @@ use std::{ }, task::Poll, }; -use wasm_timer::Instant; -pub use behaviour::{ResponseFailure, InboundFailure, RequestFailure, OutboundFailure}; +pub use behaviour::{ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, IfDisconnected}; mod metrics; mod out_events; @@ -100,9 +125,7 @@ pub struct NetworkService { to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc>>, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: Mutex>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, @@ -128,9 +151,15 @@ impl NetworkWorker { ¶ms.network_config.transport, )?; ensure_addresses_consistent_with_transport( - params.network_config.reserved_nodes.iter().map(|x| &x.multiaddr), + params.network_config.default_peers_set.reserved_nodes.iter().map(|x| &x.multiaddr), ¶ms.network_config.transport, )?; + for extra_set in ¶ms.network_config.extra_sets { + ensure_addresses_consistent_with_transport( + extra_set.set_config.reserved_nodes.iter().map(|x| &x.multiaddr), + ¶ms.network_config.transport, + )?; + } ensure_addresses_consistent_with_transport( params.network_config.public_addresses.iter(), ¶ms.network_config.transport, @@ -138,12 +167,35 @@ impl NetworkWorker { let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker"); - if let Some(path) = params.network_config.net_config_path { - fs::create_dir_all(&path)?; + if let Some(path) = ¶ms.network_config.net_config_path { + fs::create_dir_all(path)?; } + // Private and public keys configuration. + let local_identity = params.network_config.node_key.clone().into_keypair()?; + let local_public = local_identity.public(); + let local_peer_id = local_public.clone().into_peer_id(); + info!( + target: "sub-libp2p", + "🏷 Local node identity is: {}", + local_peer_id.to_base58(), + ); + + let (protocol, peerset_handle, mut known_addresses) = Protocol::new( + protocol::ProtocolConfig { + roles: From::from(¶ms.role), + max_parallel_downloads: params.network_config.max_parallel_downloads, + }, + params.chain.clone(), + params.transaction_pool, + params.protocol_id.clone(), + ¶ms.role, + ¶ms.network_config, + params.block_announce_validator, + params.metrics_registry.as_ref(), + )?; + // List of multiaddresses that we know in the network. - let mut known_addresses = Vec::new(); let mut bootnodes = Vec::new(); let mut boot_node_ids = HashSet::new(); @@ -173,71 +225,21 @@ impl NetworkWorker { } )?; - // Initialize the peers we should always be connected to. - let priority_groups = { - let mut reserved_nodes = HashSet::new(); - for reserved in params.network_config.reserved_nodes.iter() { - reserved_nodes.insert(reserved.peer_id.clone()); - known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); - } - - let print_deprecated_message = match ¶ms.role { - Role::Sentry { .. } => true, - Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true, - _ => false, - }; - if print_deprecated_message { - log::warn!( - "🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \ - CLI options will eventually be removed in a future version. The Substrate \ - and Polkadot networking protocol require validators to be \ - publicly-accessible. Please do not block access to your validator nodes. \ - For details, see https://github.com/paritytech/substrate/issues/6845." - ); - } - - let mut sentries_and_validators = HashSet::new(); - match ¶ms.role { - Role::Sentry { validators } => { - for validator in validators { - sentries_and_validators.insert(validator.peer_id.clone()); - reserved_nodes.insert(validator.peer_id.clone()); - known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); - } - } - Role::Authority { sentry_nodes } => { - for sentry_node in sentry_nodes { - sentries_and_validators.insert(sentry_node.peer_id.clone()); - reserved_nodes.insert(sentry_node.peer_id.clone()); - known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); - } - } - _ => {} - } - - vec![ - ("reserved".to_owned(), reserved_nodes), - ("sentries_and_validators".to_owned(), sentries_and_validators), - ] - }; - - let peerset_config = sc_peerset::PeersetConfig { - in_peers: params.network_config.in_peers, - out_peers: params.network_config.out_peers, - bootnodes, - reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny, - priority_groups, + // Print a message about the deprecation of sentry nodes. + let print_deprecated_message = match ¶ms.role { + Role::Sentry { .. } => true, + Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true, + _ => false, }; - - // Private and public keys configuration. - let local_identity = params.network_config.node_key.clone().into_keypair()?; - let local_public = local_identity.public(); - let local_peer_id = local_public.clone().into_peer_id(); - info!( - target: "sub-libp2p", - "🏷 Local node identity is: {}", - local_peer_id.to_base58(), - ); + if print_deprecated_message { + log::warn!( + "🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \ + CLI options will eventually be removed in a future version. The Substrate \ + and Polkadot networking protocol require validators to be \ + publicly-accessible. Please do not block access to your validator nodes. \ + For details, see https://github.com/paritytech/substrate/issues/6845." + ); + } let checker = params.on_demand.as_ref() .map(|od| od.checker().clone()) @@ -245,42 +247,19 @@ impl NetworkWorker { let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); - let (protocol, peerset_handle) = Protocol::new( - protocol::ProtocolConfig { - roles: From::from(¶ms.role), - max_parallel_downloads: params.network_config.max_parallel_downloads, - }, - local_peer_id.clone(), - params.chain.clone(), - params.transaction_pool, - params.finality_proof_request_builder, - params.protocol_id.clone(), - peerset_config, - params.block_announce_validator, - params.metrics_registry.as_ref(), - boot_node_ids.clone(), - )?; // Build the swarm. + let client = params.chain.clone(); let (mut swarm, bandwidth): (Swarm, _) = { let user_agent = format!( "{} ({})", params.network_config.client_version, params.network_config.node_name ); - let block_requests = { - let config = block_requests::Config::new(¶ms.protocol_id); - block_requests::BlockRequests::new(config, params.chain.clone()) - }; - let finality_proof_requests = { - let config = finality_requests::Config::new(¶ms.protocol_id); - finality_requests::FinalityProofRequests::new(config, params.finality_proof_provider.clone()) - }; - let light_client_handler = { - let config = light_client_handler::Config::new(¶ms.protocol_id); - light_client_handler::LightClientHandler::new( - config, - params.chain, + + let light_client_request_sender = { + light_client_requests::sender::LightClientRequestSender::new( + ¶ms.protocol_id, checker, peerset_handle.clone(), ) @@ -289,8 +268,9 @@ impl NetworkWorker { let discovery_config = { let mut config = DiscoveryConfig::new(local_public.clone()); config.with_user_defined(known_addresses); - config.discovery_limit(u64::from(params.network_config.out_peers) + 15); + config.discovery_limit(u64::from(params.network_config.default_peers_set.out_peers) + 15); config.add_protocol(params.protocol_id.clone()); + config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); config.use_kademlia_disjoint_query_paths(params.network_config.kademlia_disjoint_query_paths); @@ -308,16 +288,65 @@ impl NetworkWorker { config }; - let mut behaviour = { + let (transport, bandwidth) = { + let (config_mem, config_wasm) = match params.network_config.transport { + TransportConfig::MemoryOnly => (true, None), + TransportConfig::Normal { wasm_external_transport, .. } => + (false, wasm_external_transport) + }; + + // The yamux buffer size limit is configured to be equal to the maximum frame size + // of all protocols. 10 bytes are added to each limit for the length prefix that + // is not included in the upper layer protocols limit but is still present in the + // yamux buffer. These 10 bytes correspond to the maximum size required to encode + // a variable-length-encoding 64bits number. In other words, we make the + // assumption that no notification larger than 2^64 will ever be sent. + let yamux_maximum_buffer_size = { + let requests_max = params.network_config + .request_response_protocols.iter() + .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::max_value())); + let responses_max = params.network_config + .request_response_protocols.iter() + .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::max_value())); + let notifs_max = params.network_config + .extra_sets.iter() + .map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::max_value())); + + // A "default" max is added to cover all the other protocols: ping, identify, + // kademlia, block announces, and transactions. + let default_max = cmp::max( + 1024 * 1024, + usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) + .unwrap_or(usize::max_value()) + ); + + iter::once(default_max) + .chain(requests_max).chain(responses_max).chain(notifs_max) + .max().expect("iterator known to always yield at least one element; qed") + .saturating_add(10) + }; + + transport::build_transport( + local_identity, + config_mem, + config_wasm, + params.network_config.yamux_window_size, + yamux_maximum_buffer_size + ) + }; + + let behaviour = { + let bitswap = if params.network_config.ipfs_server { Some(Bitswap::new(client)) } else { None }; let result = Behaviour::new( protocol, params.role, user_agent, local_public, - block_requests, - finality_proof_requests, - light_client_handler, + light_client_request_sender, discovery_config, + params.block_request_protocol_config, + bitswap, + params.light_client_request_protocol_config, params.network_config.request_response_protocols, ); @@ -331,19 +360,12 @@ impl NetworkWorker { } }; - for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { - behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); - } - let (transport, bandwidth) = { - let (config_mem, config_wasm) = match params.network_config.transport { - TransportConfig::MemoryOnly => (true, None), - TransportConfig::Normal { wasm_external_transport, .. } => - (false, wasm_external_transport) - }; - transport::build_transport(local_identity, config_mem, config_wasm) - }; let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) - .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER) + .connection_limits(ConnectionLimits::default() + .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) + .with_max_established_incoming(Some(crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING)) + ) + .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) .connection_event_buffer_size(1024); if let Some(spawner) = params.executor { @@ -379,14 +401,11 @@ impl NetworkWorker { // Add external addresses. for addr in ¶ms.network_config.public_addresses { - Swarm::::add_external_address(&mut swarm, addr.clone()); + Swarm::::add_external_address(&mut swarm, addr.clone(), AddressScore::Infinite); } let external_addresses = Arc::new(Mutex::new(Vec::new())); let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); - let protocol_name_by_engine = Mutex::new({ - params.network_config.notifications_protocols.iter().cloned().collect() - }); let service = Arc::new(NetworkService { bandwidth, @@ -397,7 +416,6 @@ impl NetworkWorker { local_peer_id, to_worker, peers_notifications_sinks: peers_notifications_sinks.clone(), - protocol_name_by_engine, notifications_sizes_metric: metrics.as_ref().map(|metrics| metrics.notifications_sizes.clone()), _marker: PhantomData, @@ -416,7 +434,6 @@ impl NetworkWorker { peers_notifications_sinks, metrics, boot_node_ids, - pending_requests: HashMap::with_capacity(128), }) } @@ -499,11 +516,9 @@ impl NetworkWorker { self.network_service.user_protocol_mut().on_block_finalized(hash, &header); } - /// This should be called when blocks are added to the - /// chain by something other than the import queue. - /// Currently this is only useful for tests. - pub fn update_chain(&mut self) { - self.network_service.user_protocol_mut().update_chain(); + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + self.network_service.user_protocol_mut().new_best_block_imported(hash, number); } /// Returns the local `PeerId`. @@ -545,8 +560,6 @@ impl NetworkWorker { version_string: swarm.node(peer_id) .and_then(|i| i.client_version().map(|s| s.to_owned())), latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()), - enabled: swarm.user_protocol().is_enabled(&peer_id), - open: swarm.user_protocol().is_open(&peer_id), known_addresses, })) }).collect() @@ -568,10 +581,17 @@ impl NetworkWorker { .collect() }; + let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); + let listened_addresses = Swarm::::listeners(&swarm).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&swarm) + .map(|r| &r.addr) + .cloned() + .collect(); + NetworkState { - peer_id: Swarm::::local_peer_id(&swarm).to_base58(), - listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), - external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), + peer_id, + listened_addresses, + external_addresses, connected_peers, not_connected_peers, peerset: swarm.user_protocol_mut().peerset_debug_info(), @@ -609,7 +629,9 @@ impl NetworkService { /// Need a better solution to manage authorized peers, but now just use reserved peers for /// prototyping. pub fn set_authorized_peers(&self, peers: HashSet) { - self.peerset.set_reserved_peers(peers) + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); } /// Set authorized_only flag. @@ -617,7 +639,9 @@ impl NetworkService { /// Need a better solution to decide authorized_only, but now just use reserved_only flag for /// prototyping. pub fn set_authorized_only(&self, reserved_only: bool) { - self.peerset.set_reserved_only(reserved_only) + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); } /// Appends a notification to the buffer of pending outgoing notifications with the given peer. @@ -637,43 +661,43 @@ impl NetworkService { /// > between the remote voluntarily closing a substream or a network error /// > preventing the message from being delivered. /// - /// The protocol must have been registered with `register_notifications_protocol` or + /// The protocol must have been registered with /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// - pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { + pub fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + if let Some(sink) = peers_notifications_sinks.get(&(target.clone(), protocol.clone())) { sink.clone() } else { // Notification silently discarded, as documented. + log::debug!( + target: "sub-libp2p", + "Attempted to send notification on missing or closed substream: {}, {:?}", + target, protocol, + ); return; } }; - // Used later for the metrics report. - let message_len = message.len(); - - // Determine the wire protocol name corresponding to this `engine_id`. - let protocol_name = self.protocol_name_by_engine.lock().get(&engine_id).cloned(); - if let Some(protocol_name) = protocol_name { - sink.send_sync_notification(protocol_name, message); - } else { - log::error!( - target: "sub-libp2p", - "Attempted to send notification on unknown protocol: {:?}", - engine_id, - ); - return; - } - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { notifications_sizes_metric - .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - .observe(message_len as f64); + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); } + + // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, + protocol, + message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + sink.send_sync_notification(message); } /// Obtains a [`NotificationSender`] for a connected peer, if it exists. @@ -698,7 +722,7 @@ impl NetworkService { /// return an error. It is however possible for the entire connection to be abruptly closed, /// in which case enqueued notifications will be lost. /// - /// The protocol must have been registered with `register_notifications_protocol` or + /// The protocol must have been registered with /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// /// # Usage @@ -746,31 +770,27 @@ impl NetworkService { pub fn notification_sender( &self, target: PeerId, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, ) -> Result { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { return Err(NotificationSenderError::Closed); } }; - // Determine the wire protocol name corresponding to this `engine_id`. - let protocol_name = match self.protocol_name_by_engine.lock().get(&engine_id).cloned() { - Some(p) => p, - None => return Err(NotificationSenderError::BadProtocol), - }; + let notification_size_metric = self.notifications_sizes_metric.as_ref().map(|histogram| { + histogram.with_label_values(&["out", &protocol]) + }); Ok(NotificationSender { sink, - protocol_name, - notification_size_metric: self.notifications_sizes_metric.as_ref().map(|histogram| { - histogram.with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - }), + protocol_name: protocol, + notification_size_metric, }) } @@ -796,9 +816,10 @@ impl NetworkService { /// notifications should remain the default ways of communicating information. For example, a /// peer can announce something through a notification, after which the recipient can obtain /// more information by performing a request. - /// As such, this function is meant to be called only with peers we are already connected to. - /// Calling this method with a `target` we are not connected to will *not* attempt to connect - /// to said peer. + /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way you + /// will get an error immediately for disconnected peers, instead of waiting for a potentially very + /// long connection attempt, which would suggest that something is wrong anyway, as you are + /// supposed to be connected because of the notification protocol. /// /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. /// Such restrictions, if desired, need to be enforced at the call site(s). @@ -810,15 +831,12 @@ impl NetworkService { &self, target: PeerId, protocol: impl Into>, - request: Vec + request: Vec, + connect: IfDisconnected, ) -> Result, RequestFailure> { let (tx, rx) = oneshot::channel(); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { - target, - protocol: protocol.into(), - request, - pending_response: tx - }); + + self.start_request(target, protocol, request, tx, connect); match rx.await { Ok(v) => v, @@ -829,29 +847,29 @@ impl NetworkService { } } - /// Registers a new notifications protocol. + /// Variation of `request` which starts a request whose response is delivered on a provided channel. /// - /// After a protocol has been registered, you can call `write_notifications`. + /// Instead of blocking and waiting for a reply, this function returns immediately, sending + /// responses via the passed in sender. This alternative API exists to make it easier to + /// integrate with message passing APIs. /// - /// **Important**: This method is a work-around, and you are instead strongly encouraged to - /// pass the protocol in the `NetworkConfiguration::notifications_protocols` list instead. - /// If you have no other choice but to use this method, you are very strongly encouraged to - /// call it very early on. Any connection open will retain the protocols that were registered - /// then, and not any new one. - /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - // TODO: remove this method after https://github.com/paritytech/substrate/issues/4587 - pub fn register_notifications_protocol( + /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a + /// closing connection. This is expected behaviour. With `request` you would get a + /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. + pub fn start_request( &self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + target: PeerId, + protocol: impl Into>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, ) { - let protocol_name = protocol_name.into(); - self.protocol_name_by_engine.lock().insert(engine_id, protocol_name.clone()); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { - engine_id, - protocol_name, + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx, + connect, }); } @@ -875,7 +893,7 @@ impl NetworkService { /// /// In chain-based consensus, we often need to make sure non-best forks are /// at least temporarily synced. This function forces such an announcement. - pub fn announce_block(&self, hash: B::Hash, data: Vec) { + pub fn announce_block(&self, hash: B::Hash, data: Option>) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); } @@ -888,8 +906,12 @@ impl NetworkService { /// Disconnect from a node as soon as possible. /// /// This triggers the same effects as if the connection had closed itself spontaneously. - pub fn disconnect_peer(&self, who: PeerId) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who)); + /// + /// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also + /// prevents the local node from re-establishing an outgoing substream to this peer until it + /// is added again. + pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); } /// Request a justification for the given block from the network. @@ -927,19 +949,19 @@ impl NetworkService { .unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } - /// Connect to unreserved peers and allow unreserved peers to connect. + /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. pub fn accept_unreserved_peers(&self) { - self.peerset.set_reserved_only(false); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } - /// Disconnect from unreserved peers and deny new unreserved peers to connect. + /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing + /// purposes. pub fn deny_unreserved_peers(&self) { - self.peerset.set_reserved_only(true); - } - - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer: PeerId) { - self.peerset.remove_reserved_peer(peer); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } /// Adds a `PeerId` and its address as reserved. The string should encode the address @@ -953,87 +975,133 @@ impl NetworkService { if peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } - self.peerset.add_reserved_peer(peer_id.clone()); + let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); Ok(()) } - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer_id: PeerId) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + .unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - /// Modify a peerset priority group. + /// Add peers to a peer set. /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - // - // NOTE: even though this function is currently sync, it's marked as async for - // future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451. - pub async fn set_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + pub fn add_peers_to_reserved_set(&self, protocol: Cow<'static, str>, peers: HashSet) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; - let peer_ids = peers.iter().map(|(peer_id, _addr)| peer_id.clone()).collect(); - - self.peerset.set_priority_group(group_id, peer_ids); - for (peer_id, addr) in peers.into_iter() { + // Make sure the local peer ID is never added to the PSM. + if peer_id == self.local_peer_id { + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + } + + if !addr.is_empty() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::AddSetReserved(protocol.clone(), peer_id)); } Ok(()) } - /// Add peers to a peerset priority group. + /// Remove peers from a peer set. /// /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). // - // NOTE: even though this function is currently sync, it's marked as async for - // future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451. - pub async fn add_to_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for convenience. + pub fn remove_peers_from_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet + ) -> Result<(), String> { + let peers = self.split_multiaddr_and_peer_id(peers)?; + for (peer_id, _) in peers.into_iter() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RemoveSetReserved(protocol.clone(), peer_id)); + } + Ok(()) + } + + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } + + /// Add a peer to a set of peers. + /// + /// If the set has slots available, it will try to open a substream with this peer. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peers: HashSet) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { - self.peerset.add_to_priority_group(group_id.clone(), peer_id.clone()); + // Make sure the local peer ID is never added to the PSM. + if peer_id == self.local_peer_id { + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + } + if !addr.is_empty() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::AddToPeersSet(protocol.clone(), peer_id)); } Ok(()) } - /// Remove peers from a peerset priority group. + /// Remove peers from a peer set. + /// + /// If we currently have an open substream with this peer, it will soon be closed. /// /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). // - // NOTE: even though this function is currently sync, it's marked as async for - // future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451. // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for convenience. - pub async fn remove_from_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: HashSet) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, _) in peers.into_iter() { - self.peerset.remove_from_priority_group(group_id.clone(), peer_id); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RemoveFromPeersSet(protocol.clone(), peer_id)); } Ok(()) } @@ -1043,24 +1111,14 @@ impl NetworkService { self.num_connected.load(Ordering::Relaxed) } - /// This function should be called when blocks are added to the chain by something other - /// than the import queue. - /// - /// > **Important**: This function is a hack and can be removed at any time. Do **not** use it. - pub fn update_chain(&self) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::UpdateChain); - } - - /// Inform the network service about an own imported block. - pub fn own_block_imported(&self, hash: B::Hash, number: NumberFor) { + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::OwnBlockImported(hash, number)); + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } - /// Utility function to extract `PeerId` from each `Multiaddr` for priority group updates. + /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). @@ -1076,7 +1134,7 @@ impl NetworkService { // Make sure the local peer ID is never added to the PSM // or added as a "known address", even if given. if peer == self.local_peer_id { - Err("Local peer ID in priority group.".to_string()) + Err("Local peer ID in peer set.".to_string()) } else { Ok((peer, addr)) } @@ -1142,10 +1200,12 @@ impl NotificationSender { /// Returns a future that resolves when the `NotificationSender` is ready to send a notification. pub async fn ready<'a>(&'a self) -> Result, NotificationSenderError> { Ok(NotificationSenderReady { - ready: match self.sink.reserve_notification(self.protocol_name.clone()).await { + ready: match self.sink.reserve_notification().await { Ok(r) => r, Err(()) => return Err(NotificationSenderError::Closed), }, + peer_id: self.sink.peer_id(), + protocol_name: &self.protocol_name, notification_size_metric: self.notification_size_metric.clone(), }) } @@ -1156,6 +1216,12 @@ impl NotificationSender { pub struct NotificationSenderReady<'a> { ready: Ready<'a>, + /// Target of the notification. + peer_id: &'a PeerId, + + /// Name of the protocol on the wire. + protocol_name: &'a Cow<'static, str>, + /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notification_size_metric: Option, @@ -1170,6 +1236,15 @@ impl<'a> NotificationSenderReady<'a> { notification_size_metric.observe(notification.len() as f64); } + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {}, {} bytes)", + self.peer_id, + self.protocol_name, + notification.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); + self.ready .send(notification) .map_err(|()| NotificationSenderError::Closed) @@ -1196,10 +1271,18 @@ enum ServiceToWorkerMsg { PropagateTransaction(H), PropagateTransactions, RequestJustification(B::Hash, NumberFor), - AnnounceBlock(B::Hash, Vec), + AnnounceBlock(B::Hash, Option>), GetValue(record::Key), PutValue(record::Key, Vec), AddKnownAddress(PeerId, Multiaddr), + SetReservedOnly(bool), + AddReserved(PeerId), + RemoveReserved(PeerId), + SetReserved(HashSet), + AddSetReserved(Cow<'static, str>, PeerId), + RemoveSetReserved(Cow<'static, str>, PeerId), + AddToPeersSet(Cow<'static, str>, PeerId), + RemoveFromPeersSet(Cow<'static, str>, PeerId), SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), Request { @@ -1207,14 +1290,10 @@ enum ServiceToWorkerMsg { protocol: Cow<'static, str>, request: Vec, pending_response: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, }, - RegisterNotifProtocol { - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, - }, - DisconnectPeer(PeerId), - UpdateChain, - OwnBlockImported(B::Hash, NumberFor), + DisconnectPeer(PeerId, Cow<'static, str>), + NewBestBlockImported(B::Hash, NumberFor), } /// Main network worker. Must be polled in order for the network to advance. @@ -1237,23 +1316,16 @@ pub struct NetworkWorker { /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver>, /// Receiver for queries from the light client that must be processed. - light_client_rqs: Option>>, + light_client_rqs: Option>>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. metrics: Option, /// The `PeerId`'s of all boot nodes. boot_node_ids: Arc>, - /// Requests started using [`NetworkService::request`]. Includes the channel to send back the - /// response, when the request has started, and the name of the protocol for diagnostic - /// purposes. - pending_requests: HashMap< - behaviour::RequestId, - (oneshot::Sender, RequestFailure>>, Instant, String) - >, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, } impl Future for NetworkWorker { @@ -1270,10 +1342,14 @@ impl Future for NetworkWorker { // Check for new incoming light client requests. if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - // This can error if there are too many queued requests already. - if this.network_service.light_client_request(rq).is_err() { - log::warn!("Couldn't start light client request: too many pending requests"); + let result = this.network_service.light_client_request(rq); + match result { + Ok(()) => {}, + Err(light_client_requests::sender::SendRequestError::TooManyRequests) => { + log::warn!("Couldn't start light client request: too many pending requests"); + } } + if let Some(metrics) = this.metrics.as_ref() { metrics.issued_light_requests.inc(); } @@ -1316,47 +1392,35 @@ impl Future for NetworkWorker { this.network_service.get_value(&key), ServiceToWorkerMsg::PutValue(key, value) => this.network_service.put_value(key, value), + ServiceToWorkerMsg::SetReservedOnly(reserved_only) => + this.network_service.user_protocol_mut().set_reserved_only(reserved_only), + ServiceToWorkerMsg::SetReserved(peers) => + this.network_service.user_protocol_mut().set_reserved_peers(peers), + ServiceToWorkerMsg::AddReserved(peer_id) => + this.network_service.user_protocol_mut().add_reserved_peer(peer_id), + ServiceToWorkerMsg::RemoveReserved(peer_id) => + this.network_service.user_protocol_mut().remove_reserved_peer(peer_id), + ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) => + this.network_service.user_protocol_mut().add_set_reserved_peer(protocol, peer_id), + ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) => + this.network_service.user_protocol_mut().remove_set_reserved_peer(protocol, peer_id), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => this.network_service.add_known_address(peer_id, addr), + ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => + this.network_service.user_protocol_mut().add_to_peers_set(protocol, peer_id), + ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) => + this.network_service.user_protocol_mut().remove_from_peers_set(protocol, peer_id), ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), - ServiceToWorkerMsg::Request { target, protocol, request, pending_response } => { - // Calling `send_request` can fail immediately in some circumstances. - // This is handled by sending back an error on the channel. - match this.network_service.send_request(&target, &protocol, request) { - Ok(request_id) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_started_total - .with_label_values(&[&protocol]) - .inc(); - } - this.pending_requests.insert( - request_id, - (pending_response, Instant::now(), protocol.to_string()) - ); - }, - Err(behaviour::SendRequestError::NotConnected) => { - let err = RequestFailure::Network(OutboundFailure::ConnectionClosed); - let _ = pending_response.send(Err(err)); - }, - Err(behaviour::SendRequestError::UnknownProtocol) => { - let err = RequestFailure::Network(OutboundFailure::UnsupportedProtocols); - let _ = pending_response.send(Err(err)); - }, - } + ServiceToWorkerMsg::Request { target, protocol, request, pending_response, connect } => { + this.network_service.send_request(&target, &protocol, request, pending_response, connect); }, - ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { - this.network_service - .register_notifications_protocol(engine_id, protocol_name); - }, - ServiceToWorkerMsg::DisconnectPeer(who) => - this.network_service.user_protocol_mut().disconnect_peer(&who), - ServiceToWorkerMsg::UpdateChain => - this.network_service.user_protocol_mut().update_chain(), - ServiceToWorkerMsg::OwnBlockImported(hash, number) => - this.network_service.user_protocol_mut().own_block_imported(hash, number), + ServiceToWorkerMsg::DisconnectPeer(who, protocol_name) => + this.network_service.user_protocol_mut().disconnect_peer(&who, &protocol_name), + ServiceToWorkerMsg::NewBestBlockImported(hash, number) => + this.network_service.user_protocol_mut().new_best_block_imported(hash, number), } } @@ -1389,12 +1453,6 @@ impl Future for NetworkWorker { } this.import_queue.import_justification(origin, hash, nb, justification); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_finality_proofs_submitted.inc(); - } - this.import_queue.import_finality_proof(origin, hash, nb, proof); - }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { if let Some(metrics) = this.metrics.as_ref() { match result { @@ -1405,10 +1463,11 @@ impl Future for NetworkWorker { } Err(err) => { let reason = match err { - ResponseFailure::Busy => "busy", ResponseFailure::Network(InboundFailure::Timeout) => "timeout", ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => "unsupported", + ResponseFailure::Network(InboundFailure::ResponseOmission) => + "busy-omitted", ResponseFailure::Network(InboundFailure::ConnectionClosed) => "connection-closed", }; @@ -1420,51 +1479,37 @@ impl Future for NetworkWorker { } } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { request_id, result })) => { - if let Some((send_back, started, protocol)) = this.pending_requests.remove(&request_id) { - if let Some(metrics) = this.metrics.as_ref() { - match &result { - Ok(_) => { - metrics.requests_out_success_total - .with_label_values(&[&protocol]) - .observe(started.elapsed().as_secs_f64()); - } - Err(err) => { - let reason = match err { - RequestFailure::Refused => "refused", - RequestFailure::Network(OutboundFailure::DialFailure) => - "dial-failure", - RequestFailure::Network(OutboundFailure::Timeout) => - "timeout", - RequestFailure::Network(OutboundFailure::ConnectionClosed) => - "connection-closed", - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => - "unsupported", - }; - - metrics.requests_out_failure_total - .with_label_values(&[&protocol, reason]) - .inc(); - } + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { + protocol, duration, result, .. + })) => { + if let Some(metrics) = this.metrics.as_ref() { + match result { + Ok(_) => { + metrics.requests_out_success_total + .with_label_values(&[&protocol]) + .observe(duration.as_secs_f64()); + } + Err(err) => { + let reason = match err { + RequestFailure::NotConnected => "not-connected", + RequestFailure::UnknownProtocol => "unknown-protocol", + RequestFailure::Refused => "refused", + RequestFailure::Obsolete => "obsolete", + RequestFailure::Network(OutboundFailure::DialFailure) => + "dial-failure", + RequestFailure::Network(OutboundFailure::Timeout) => + "timeout", + RequestFailure::Network(OutboundFailure::ConnectionClosed) => + "connection-closed", + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => + "unsupported", + }; + + metrics.requests_out_failure_total + .with_label_values(&[&protocol, reason]) + .inc(); } } - let _ = send_back.send(result); - } else { - error!("Request not in pending_requests"); - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestStarted { protocol, .. })) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_started_total - .with_label_values(&[&protocol]) - .inc(); - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestFinished { protocol, request_duration, .. })) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_success_total - .with_label_values(&[&protocol]) - .observe(request_duration.as_secs_f64()); } }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(protocol))) => { @@ -1474,24 +1519,28 @@ impl Future for NetworkWorker { .inc(); } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, engine_id, notifications_sink, role })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { + remote, protocol, notifications_sink, role + })) => { if let Some(metrics) = this.metrics.as_ref() { metrics.notifications_streams_opened_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id)]).inc(); + .with_label_values(&[&protocol]).inc(); } { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - peers_notifications_sinks.insert((remote.clone(), engine_id), notifications_sink); + peers_notifications_sinks.insert((remote.clone(), protocol.clone()), notifications_sink); } this.event_streams.send(Event::NotificationStreamOpened { remote, - engine_id, + protocol, role, }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, engine_id, notifications_sink })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { + remote, protocol, notifications_sink + })) => { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - if let Some(s) = peers_notifications_sinks.get_mut(&(remote, engine_id)) { + if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) { *s = notifications_sink; } else { log::error!( @@ -1513,33 +1562,33 @@ impl Future for NetworkWorker { // https://github.com/paritytech/substrate/issues/6403. /*this.event_streams.send(Event::NotificationStreamClosed { remote, - engine_id, + protocol, }); this.event_streams.send(Event::NotificationStreamOpened { remote, - engine_id, + protocol, role, });*/ }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, engine_id })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, protocol })) => { if let Some(metrics) = this.metrics.as_ref() { metrics.notifications_streams_closed_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id[..])]).inc(); + .with_label_values(&[&protocol[..]]).inc(); } this.event_streams.send(Event::NotificationStreamClosed { remote: remote.clone(), - engine_id, + protocol: protocol.clone(), }); { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - peers_notifications_sinks.remove(&(remote.clone(), engine_id)); + peers_notifications_sinks.remove(&(remote.clone(), protocol)); } }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages })) => { if let Some(metrics) = this.metrics.as_ref() { - for (engine_id, message) in &messages { + for (protocol, message) in &messages { metrics.notifications_sizes - .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) + .with_label_values(&["in", protocol]) .observe(message.len() as f64); } } @@ -1548,6 +1597,12 @@ impl Future for NetworkWorker { messages, }); }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncConnected(remote))) => { + this.event_streams.send(Event::SyncConnected { remote }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncDisconnected(remote))) => { + this.event_streams.send(Event::SyncDisconnected { remote }); + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => { if let Some(metrics) = this.metrics.as_ref() { let query_type = match event { @@ -1587,14 +1642,11 @@ impl Future for NetworkWorker { let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::B( - EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", + EitherError::A(EitherError::B(EitherError::A( + PingFailure::Timeout)))))))) => "ping-timeout", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", + EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged))))))) => "sync-notifications-clogged", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", None => "actively-closed", @@ -1714,7 +1766,10 @@ impl Future for NetworkWorker { // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); { - let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&this.network_service) + .map(|r| &r.addr) + .cloned() + .collect(); *this.external_addresses.lock() = external_addresses; } @@ -1741,7 +1796,9 @@ impl Future for NetworkWorker { } metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); - metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); + metrics.pending_connections.set( + Swarm::network_info(&this.network_service).connection_counters().num_pending() as u64 + ); } Poll::Pending @@ -1751,17 +1808,6 @@ impl Future for NetworkWorker { impl Unpin for NetworkWorker { } -/// Turns bytes that are potentially UTF-8 into a reasonable representable string. -/// -/// Meant to be used only for debugging or metrics-reporting purposes. -pub(crate) fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { - if let Ok(s) = std::str::from_utf8(&id[..]) { - Cow::Borrowed(s) - } else { - Cow::Owned(format!("{:?}", id)) - } -} - /// The libp2p swarm, customized for our needs. type Swarm = libp2p::swarm::Swarm>; @@ -1780,33 +1826,11 @@ impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { self.protocol.user_protocol_mut().on_blocks_processed(imported, count, results) } fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { - self.protocol.user_protocol_mut().justification_import_result(hash.clone(), number, success); - if !success { - info!("💔 Invalid justification provided by {} for #{}", who, hash); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid justification")); - } + self.protocol.user_protocol_mut().justification_import_result(who, hash.clone(), number, success); } fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { self.protocol.user_protocol_mut().request_justification(hash, number) } - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_finality_proof(hash, number) - } - fn finality_proof_imported( - &mut self, - who: PeerId, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let success = finalization_result.is_ok(); - self.protocol.user_protocol_mut().finality_proof_import_result(request_block, finalization_result); - if !success { - info!("💔 Invalid finality proof provided by {} for #{}", who, request_block.0); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); - } - } } fn ensure_addresses_consistent_with_transport<'a>( diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index a63ce7a18a519..40d65ea45f111 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -56,7 +56,6 @@ pub struct Metrics { pub distinct_peers_connections_closed_total: Counter, pub distinct_peers_connections_opened_total: Counter, pub import_queue_blocks_submitted: Counter, - pub import_queue_finality_proofs_submitted: Counter, pub import_queue_justifications_submitted: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, @@ -79,7 +78,6 @@ pub struct Metrics { pub requests_in_success_total: HistogramVec, pub requests_out_failure_total: CounterVec, pub requests_out_success_total: HistogramVec, - pub requests_out_started_total: CounterVec, } impl Metrics { @@ -112,10 +110,6 @@ impl Metrics { "import_queue_blocks_submitted", "Number of blocks submitted to the import queue.", )?, registry)?, - import_queue_finality_proofs_submitted: prometheus::register(Counter::new( - "import_queue_finality_proofs_submitted", - "Number of finality proofs submitted to the import queue.", - )?, registry)?, import_queue_justifications_submitted: prometheus::register(Counter::new( "import_queue_justifications_submitted", "Number of justifications submitted to the import queue.", @@ -235,7 +229,8 @@ impl Metrics { HistogramOpts { common_opts: Opts::new( "sub_libp2p_requests_in_success_total", - "Total number of requests received and answered" + "For successful incoming requests, time between receiving the request and \ + starting to send the response" ), buckets: prometheus::exponential_buckets(0.001, 2.0, 16) .expect("parameters are always valid values; qed"), @@ -253,20 +248,13 @@ impl Metrics { HistogramOpts { common_opts: Opts::new( "sub_libp2p_requests_out_success_total", - "For successful requests, time between a request's start and finish" + "For successful outgoing requests, time between a request's start and finish" ), buckets: prometheus::exponential_buckets(0.001, 2.0, 16) .expect("parameters are always valid values; qed"), }, &["protocol"] )?, registry)?, - requests_out_started_total: prometheus::register(CounterVec::new( - Opts::new( - "sub_libp2p_requests_out_started_total", - "Total number of requests emitted" - ), - &["protocol"] - )?, registry)?, }) } } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 1b86a5fa4317d..06c068e369da7 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -33,7 +33,6 @@ //! use crate::Event; -use super::maybe_utf8_bytes_to_string; use futures::{prelude::*, channel::mpsc, ready, stream::FusedStream}; use parking_lot::Mutex; @@ -228,23 +227,33 @@ impl Metrics { .with_label_values(&["dht", "sent", name]) .inc_by(num); } - Event::NotificationStreamOpened { engine_id, .. } => { + Event::SyncConnected { .. } => { self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "sent", name]) + .with_label_values(&["sync-connected", "sent", name]) + .inc_by(num); + } + Event::SyncDisconnected { .. } => { + self.events_total + .with_label_values(&["sync-disconnected", "sent", name]) + .inc_by(num); + } + Event::NotificationStreamOpened { protocol, .. } => { + self.events_total + .with_label_values(&[&format!("notif-open-{:?}", protocol), "sent", name]) .inc_by(num); }, - Event::NotificationStreamClosed { engine_id, .. } => { + Event::NotificationStreamClosed { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-closed-{:?}", protocol), "sent", name]) .inc_by(num); }, Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { + for (protocol, message) in messages { self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-{:?}", protocol), "sent", name]) .inc_by(num); self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "sent", name]) + .with_label_values(&[protocol, "sent", name]) .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::max_value()))); } }, @@ -258,23 +267,33 @@ impl Metrics { .with_label_values(&["dht", "received", name]) .inc(); } - Event::NotificationStreamOpened { engine_id, .. } => { + Event::SyncConnected { .. } => { + self.events_total + .with_label_values(&["sync-connected", "received", name]) + .inc(); + } + Event::SyncDisconnected { .. } => { + self.events_total + .with_label_values(&["sync-disconnected", "received", name]) + .inc(); + } + Event::NotificationStreamOpened { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-open-{:?}", protocol), "received", name]) .inc(); }, - Event::NotificationStreamClosed { engine_id, .. } => { + Event::NotificationStreamClosed { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-closed-{:?}", protocol), "received", name]) .inc(); }, Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { + for (protocol, message) in messages { self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-{:?}", protocol), "received", name]) .inc(); self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "received", name]) + .with_label_values(&[&protocol, "received", name]) .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); } }, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 4b6f9dd156482..f88854963fb95 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,11 +17,13 @@ // along with this program. If not, see . use crate::{config, Event, NetworkService, NetworkWorker}; +use crate::block_request_handler::BlockRequestHandler; +use crate::light_client_requests::handler::LightClientRequestHandler; use libp2p::PeerId; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -87,26 +89,45 @@ fn build_test_full_node(config: config::NetworkConfiguration) PassThroughVerifier(false), Box::new(client.clone()), None, - None, &sp_core::testing::TaskExecutor::new(), None, )); + let protocol_id = config::ProtocolId::from("/test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + client.clone(), + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + client.clone(), + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, network_config: config, chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), - protocol_id: config::ProtocolId::from("/test-protocol-name"), + protocol_id, import_queue, block_announce_validator: Box::new( sp_consensus::block_validation::DefaultBlockAnnounceValidator, ), metrics_registry: None, + block_request_protocol_config, + light_client_request_protocol_config, }) .unwrap(); @@ -121,29 +142,43 @@ fn build_test_full_node(config: config::NetworkConfiguration) (service, event_stream) } -const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> (Arc, impl Stream, Arc, impl Stream) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + max_notification_size: 1024 * 1024, + set_config: Default::default() + } + ], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + .. Default::default() + } + } + ], listen_addresses: vec![], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); @@ -161,10 +196,10 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node1.write_notification(node2.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node2.write_notification(node1.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } async_std::task::block_on(async move { @@ -187,18 +222,18 @@ fn notifications_state_consistent() { // Start by sending a notification from node1 to node2 and vice-versa. Part of the // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node1.write_notification(node2.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } if rand::random::() % 5 >= 3 { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node2.write_notification(node1.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id().clone()); + node1.disconnect_peer(node2.local_peer_id().clone(), PROTOCOL_NAME); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id().clone()); + node2.disconnect_peer(node1.local_peer_id().clone(), PROTOCOL_NAME); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -219,31 +254,31 @@ fn notifications_state_consistent() { }; match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } - future::Either::Right(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } - future::Either::Left(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } - future::Either::Right(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); @@ -251,7 +286,7 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 4 { node1.write_notification( node2.local_peer_id().clone(), - ENGINE_ID, + PROTOCOL_NAME, b"hello world".to_vec() ); } @@ -262,13 +297,17 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 4 { node2.write_notification( node1.local_peer_id().clone(), - ENGINE_ID, + PROTOCOL_NAME, b"hello world".to_vec() ); } } // Add new events here. + future::Either::Left(Event::SyncConnected { .. }) => {} + future::Either::Right(Event::SyncConnected { .. }) => {} + future::Either::Left(Event::SyncDisconnected { .. }) => {} + future::Either::Right(Event::SyncDisconnected { .. }) => {} future::Either::Left(Event::Dht(_)) => {} future::Either::Right(Event::Dht(_)) => {} }; @@ -281,9 +320,17 @@ fn lots_of_incoming_peers_works() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (main_node, _) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![listen_addr.clone()], - in_peers: u32::max_value(), + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + in_peers: u32::max_value(), + .. Default::default() + }, + } + ], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); @@ -298,12 +345,20 @@ fn lots_of_incoming_peers_works() { let main_node_peer_id = main_node_peer_id.clone(); let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr.clone(), - peer_id: main_node_peer_id.clone(), - }], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id.clone(), + }], + .. Default::default() + }, + } + ], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); @@ -364,7 +419,7 @@ fn notifications_back_pressure() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => { for message in messages { - assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; } @@ -389,7 +444,7 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id.clone(), ENGINE_ID).unwrap(); + let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); } @@ -465,7 +520,10 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - reserved_nodes: vec![reserved_node], + default_peers_set: config::SetConfig { + reserved_nodes: vec![reserved_node], + .. Default::default() + }, .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -481,7 +539,10 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - reserved_nodes: vec![reserved_node], + default_peers_set: config::SetConfig { + reserved_nodes: vec![reserved_node], + .. Default::default() + }, .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 80d897633fd72..12c82c0fcefd0 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,9 +17,9 @@ // along with this program. If not, see . use libp2p::{ - InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, + PeerId, Transport, core::{ - self, either::{EitherOutput, EitherTransport}, muxing::StreamMuxerBox, + self, either::EitherTransport, muxing::StreamMuxerBox, transport::{Boxed, OptionalTransport}, upgrade }, mplex, identity, bandwidth, wasm_ext, noise @@ -35,12 +35,22 @@ pub use self::bandwidth::BandwidthSinks; /// If `memory_only` is true, then only communication within the same process are allowed. Only /// addresses with the format `/memory/...` are allowed. /// +/// `yamux_window_size` is the maximum size of the Yamux receive windows. `None` to leave the +/// default (256kiB). +/// +/// `yamux_maximum_buffer_size` is the maximum allowed size of the Yamux buffer. This should be +/// set either to the maximum of all the maximum allowed sizes of messages frames of all +/// high-level protocols combined, or to some generously high value if you are sure that a maximum +/// size is enforced on all high-level protocols. +/// /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. pub fn build_transport( keypair: identity::Keypair, memory_only: bool, wasm_external_transport: Option, + yamux_window_size: Option, + yamux_maximum_buffer_size: usize, ) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. let transport = if let Some(t) = wasm_external_transport { @@ -50,7 +60,7 @@ pub fn build_transport( }; #[cfg(not(target_os = "unknown"))] let transport = transport.or_transport(if !memory_only { - let desktop_trans = tcp::TcpConfig::new(); + let desktop_trans = tcp::TcpConfig::new().nodelay(true); let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()) .or_transport(desktop_trans); OptionalTransport::some(if let Ok(dns) = dns::DnsConfig::new(desktop_trans.clone()) { @@ -74,11 +84,7 @@ pub fn build_transport( // For more information about these two panics, see in "On the Importance of // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, // and Richard J. Lipton. - let noise_keypair_legacy = noise::Keypair::::new().into_authentic(&keypair) - .expect("can only fail in case of a hardware bug; since this signing is performed only \ - once and at initialization, we're taking the bet that the inconvenience of a very \ - rare panic here is basically zero"); - let noise_keypair_spec = noise::Keypair::::new().into_authentic(&keypair) + let noise_keypair = noise::Keypair::::new().into_authentic(&keypair) .expect("can only fail in case of a hardware bug; since this signing is performed only \ once and at initialization, we're taking the bet that the inconvenience of a very \ rare panic here is basically zero"); @@ -87,35 +93,30 @@ pub fn build_transport( let mut noise_legacy = noise::LegacyConfig::default(); noise_legacy.recv_legacy_handshake = true; - let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); + let mut xx_config = noise::NoiseConfig::xx(noise_keypair); xx_config.set_legacy_config(noise_legacy.clone()); - let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); - ix_config.set_legacy_config(noise_legacy); - - let extract_peer_id = |result| match result { - EitherOutput::First((peer_id, o)) => (peer_id, EitherOutput::First(o)), - EitherOutput::Second((peer_id, o)) => (peer_id, EitherOutput::Second(o)), - }; - - core::upgrade::SelectUpgrade::new(xx_config.into_authenticated(), ix_config.into_authenticated()) - .map_inbound(extract_peer_id) - .map_outbound(extract_peer_id) + xx_config.into_authenticated() }; let multiplexing_config = { let mut mplex_config = mplex::MplexConfig::new(); - mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); - mplex_config.max_buffer_len(usize::MAX); + mplex_config.set_max_buffer_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.set_max_buffer_size(usize::MAX); - let mut yamux_config = libp2p::yamux::Config::default(); + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); // Enable proper flow-control: window updates are only sent when // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + yamux_config.set_max_buffer_size(yamux_maximum_buffer_size); + + if let Some(yamux_window_size) = yamux_window_size { + yamux_config.set_receive_window_size(yamux_window_size); + } core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) }; - let transport = transport.upgrade(upgrade::Version::V1) + let transport = transport.upgrade(upgrade::Version::V1Lazy) .authenticate(authentication_config) .multiplex(multiplexing_config) .timeout(Duration::from_secs(20)) diff --git a/client/network/src/utils.rs b/client/network/src/utils.rs index 490e2ced38266..02673ef49fb4c 100644 --- a/client/network/src/utils.rs +++ b/client/network/src/utils.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use futures::{stream::unfold, FutureExt, Stream, StreamExt}; use futures_timer::Delay; diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index a8bf98a75ed61..5a799ad829410 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -13,23 +13,24 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-network = { version = "0.8.0", path = "../" } +async-std = "1.6.5" +sc-network = { version = "0.9.0", path = "../" } log = "0.4.8" -parking_lot = "0.10.0" -futures = "0.3.4" +parking_lot = "0.11.1" +futures = "0.3.9" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.29.1", default-features = false } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +libp2p = { version = "0.34.0", default-features = false } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.9.0", path = "../../consensus/common" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sc-block-builder = { version = "0.9.0", path = "../../block-builder" } +sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } tempfile = "3.1.0" -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } +sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 1d2cd3d687de9..4000e53420b4a 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -76,7 +76,7 @@ fn import_single_good_known_block_is_ignored() { block, &mut PassThroughVerifier::new(true) ) { - Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number => {} + Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {} _ => panic!() } } @@ -107,7 +107,6 @@ fn async_import_queue_drops() { verifier, Box::new(substrate_test_runtime_client::new()), None, - None, &executor, None, ); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 587feebe55c14..f523be857507f 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -29,7 +29,8 @@ use std::{ use libp2p::build_multiaddr; use log::trace; -use sc_network::config::FinalityProofProvider; +use sc_network::block_request_handler::{self, BlockRequestHandler}; +use sc_network::light_client_requests::{self, handler::LightClientRequestHandler}; use sp_blockchain::{ HeaderBackend, Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, @@ -44,21 +45,25 @@ use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_network::config::Role; use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; use sp_consensus::import_queue::{ - BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, + BasicQueue, BoxJustificationImport, Verifier, }; use sp_consensus::block_import::{BlockImport, ImportResult}; use sp_consensus::Error as ConsensusError; use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; use futures::prelude::*; -use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; -use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; +use futures::future::BoxFuture; +use sc_network::{ + NetworkWorker, NetworkService, config::{ProtocolId, MultiaddrWithPeerId, NonReservedPeerMode}, + Multiaddr, +}; +use sc_network::config::{NetworkConfiguration, NonDefaultSetConfig, TransportConfig}; use libp2p::PeerId; use parking_lot::Mutex; use sp_core::H256; use sc_network::config::ProtocolConfig; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::{ConsensusEngineId, Justification}; +use sp_runtime::Justification; use substrate_test_runtime_client::{self, AccountKeyring}; use sc_service::client::Client; pub use sc_network::config::EmptyTransactionPool; @@ -227,6 +232,7 @@ pub struct Peer { network: NetworkWorker::Hash>, imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, + listen_addr: Multiaddr, } impl Peer { @@ -266,7 +272,7 @@ impl Peer { } /// Announces an important block on the network. - pub fn announce_block(&self, hash: ::Hash, data: Vec) { + pub fn announce_block(&self, hash: ::Hash, data: Option>) { self.network.service().announce_block(hash, data); } @@ -280,7 +286,7 @@ impl Peer { where F: FnMut(BlockBuilder) -> Block { let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) + self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false, true, true) } /// Add blocks to the peer -- edit the block before adding. The chain will @@ -292,6 +298,8 @@ impl Peer { origin: BlockOrigin, mut edit_block: F, headers_only: bool, + inform_sync_about_new_best_block: bool, + announce_block: bool, ) -> H256 where F: FnMut(BlockBuilder) -> Block { let full_client = self.client.as_full() .expect("blocks could only be generated by full clients"); @@ -325,12 +333,18 @@ impl Peer { }; self.block_import.import_block(import_block, cache).expect("block_import failed"); - self.network.service().announce_block(hash, Vec::new()); + if announce_block { + self.network.service().announce_block(hash, None); + } at = hash; } - self.network.update_chain(); - self.network.service().announce_block(at.clone(), Vec::new()); + if inform_sync_about_new_best_block { + self.network.new_best_block_imported( + at, + full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number().clone(), + ); + } at } @@ -343,18 +357,48 @@ impl Peer { /// Push blocks to the peer (simplified: with or without a TX) pub fn push_headers(&mut self, count: usize) -> H256 { let best_hash = self.client.info().best_hash; - self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) + self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true, true, true) } /// Push blocks to the peer (simplified: with or without a TX) starting from /// given hash. pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { - self.generate_tx_blocks_at(at, count, with_tx, false) + self.generate_tx_blocks_at(at, count, with_tx, false, true, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash without informing the sync protocol about the new best block. + pub fn push_blocks_at_without_informing_sync( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + ) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false, false, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash without announcing the block. + pub fn push_blocks_at_without_announcing( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + ) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false, true, false) } /// Push blocks/headers to the peer (simplified: with or without a TX) starting from /// given hash. - fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { + fn generate_tx_blocks_at( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + headers_only: bool, + inform_sync_about_new_best_block: bool, + announce_block: bool, + ) -> H256 { let mut nonce = 0; if with_tx { self.generate_blocks_at( @@ -371,7 +415,9 @@ impl Peer { nonce = nonce + 1; builder.build().unwrap().block }, - headers_only + headers_only, + inform_sync_about_new_best_block, + announce_block, ) } else { self.generate_blocks_at( @@ -380,6 +426,8 @@ impl Peer { BlockOrigin::File, |builder| builder.build().unwrap().block, headers_only, + inform_sync_about_new_best_block, + announce_block, ) } } @@ -557,7 +605,11 @@ pub struct FullPeerConfig { /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, + pub notifications_protocols: Vec>, + /// The indices of the peers the peer should be connected to. + /// + /// If `None`, it will be connected to all other peers. + pub connect_to_peers: Option>, } pub trait TestNetFactory: Sized { @@ -586,20 +638,10 @@ pub trait TestNetFactory: Sized { -> ( BlockImportAdapter, Option>, - Option>, - Option>, Self::PeerData, ) { - (client.as_block_import(), None, None, None, Default::default()) - } - - /// Get finality proof provider (if supported). - fn make_finality_proof_provider( - &self, - _client: PeersClient, - ) -> Option>> { - None + (client.as_block_import(), None, Default::default()) } fn default_config() -> ProtocolConfig { @@ -636,8 +678,6 @@ pub trait TestNetFactory: Sized { let ( block_import, justification_import, - finality_proof_import, - finality_proof_request_builder, data, ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); @@ -652,7 +692,6 @@ pub trait TestNetFactory: Sized { verifier.clone(), Box::new(block_import.clone()), justification_import, - finality_proof_import, &sp_core::testing::TaskExecutor::new(), None, )); @@ -668,29 +707,61 @@ pub trait TestNetFactory: Sized { network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; - network_config.notifications_protocols = config.notifications_protocols; + network_config.extra_sets = config.notifications_protocols.into_iter().map(|p| { + NonDefaultSetConfig { + notifications_protocol: p, + max_notification_size: 1024 * 1024, + set_config: Default::default() + } + }).collect(); + if let Some(connect_to) = config.connect_to_peers { + let addrs = connect_to.iter().map(|v| { + let peer_id = self.peer(*v).network_service().local_peer_id().clone(); + let multiaddr = self.peer(*v).listen_addr.clone(); + MultiaddrWithPeerId { peer_id, multiaddr } + }).collect(); + network_config.default_peers_set.reserved_nodes = addrs; + network_config.default_peers_set.non_reserved_mode = NonReservedPeerMode::Deny; + } + + let protocol_id = ProtocolId::from("test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone()); + self.spawn_task(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = LightClientRequestHandler::new(&protocol_id, client.clone()); + self.spawn_task(handler.run().boxed()); + protocol_config + }; let network = NetworkWorker::new(sc_network::config::Params { role: Role::Full, executor: None, network_config, chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Full(client.clone(), backend.clone()), - ), - finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from("test-protocol-name"), + protocol_id, import_queue, block_announce_validator: config.block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), metrics_registry: None, + block_request_protocol_config, + light_client_request_protocol_config, }).unwrap(); - self.mut_peers(|peers| { + trace!(target: "test_network", "Peer identifier: {}", network.service().local_peer_id()); + + self.mut_peers(move |peers| { for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); + peer.network.add_known_address( + network.service().local_peer_id().clone(), + listen_addr.clone(), + ); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); @@ -706,6 +777,7 @@ pub trait TestNetFactory: Sized { block_import, verifier, network, + listen_addr, }); }); } @@ -717,8 +789,6 @@ pub trait TestNetFactory: Sized { let ( block_import, justification_import, - finality_proof_import, - finality_proof_request_builder, data, ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); @@ -733,7 +803,6 @@ pub trait TestNetFactory: Sized { verifier.clone(), Box::new(block_import.clone()), justification_import, - finality_proof_import, &sp_core::testing::TaskExecutor::new(), None, )); @@ -750,21 +819,28 @@ pub trait TestNetFactory: Sized { network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + let protocol_id = ProtocolId::from("test-protocol-name"); + + let block_request_protocol_config = block_request_handler::generate_protocol_config( + &protocol_id, + ); + + let light_client_request_protocol_config = + light_client_requests::generate_protocol_config(&protocol_id); + let network = NetworkWorker::new(sc_network::config::Params { role: Role::Light, executor: None, network_config, chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Light(client.clone(), backend.clone()) - ), - finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from("test-protocol-name"), + protocol_id, import_queue, block_announce_validator: Box::new(DefaultBlockAnnounceValidator), metrics_registry: None, + block_request_protocol_config, + light_client_request_protocol_config, }).unwrap(); self.mut_peers(|peers| { @@ -785,10 +861,16 @@ pub trait TestNetFactory: Sized { imported_blocks_stream, finality_notification_stream, network, + listen_addr, }); }); } + /// Used to spawn background tasks, e.g. the block request protocol handler. + fn spawn_task(&self, f: BoxFuture<'static, ()>) { + async_std::task::spawn(f); + } + /// Polls the testnet until all nodes are in sync. /// /// Must be executed in a task context. @@ -879,7 +961,7 @@ pub trait TestNetFactory: Sized { // We poll `imported_blocks_stream`. while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { - peer.network.service().announce_block(notification.hash, Vec::new()); + peer.network.service().announce_block(notification.hash, None); } // We poll `finality_notification_stream`, but we only take the last event. @@ -989,16 +1071,12 @@ impl TestNetFactory for JustificationTestNet { -> ( BlockImportAdapter, Option>, - Option>, - Option>, Self::PeerData, ) { ( client.as_block_import(), Some(Box::new(ForceFinalized(client))), - None, - None, Default::default(), ) } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 64985871d85e0..46fbb8f82d477 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -436,7 +436,7 @@ fn can_sync_small_non_best_forks() { assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - net.peer(0).announce_block(small_hash, Vec::new()); + net.peer(0).announce_block(small_hash, None); // after announcing, peer 1 downloads the block. @@ -452,7 +452,7 @@ fn can_sync_small_non_best_forks() { net.block_until_sync(); let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); - net.peer(0).announce_block(another_fork, Vec::new()); + net.peer(0).announce_block(another_fork, None); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { @@ -500,7 +500,7 @@ fn light_peer_imports_header_from_announce() { sp_tracing::try_init_simple(); fn import_with_announce(net: &mut TestNet, hash: H256) { - net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, None); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -610,7 +610,7 @@ fn does_not_sync_announced_old_best_block() { net.peer(0).push_blocks(18, true); net.peer(1).push_blocks(20, true); - net.peer(0).announce_block(old_hash, Vec::new()); + net.peer(0).announce_block(old_hash, None); block_on(futures::future::poll_fn::<(), _>(|cx| { // poll once to import announcement net.poll(cx); @@ -618,7 +618,7 @@ fn does_not_sync_announced_old_best_block() { })); assert!(!net.peer(1).is_major_syncing()); - net.peer(0).announce_block(old_hash_with_parent, Vec::new()); + net.peer(0).announce_block(old_hash_with_parent, None); block_on(futures::future::poll_fn::<(), _>(|cx| { // poll once to import announcement net.poll(cx); @@ -653,8 +653,8 @@ fn imports_stale_once() { fn import_with_announce(net: &mut TestNet, hash: H256) { // Announce twice - net.peer(0).announce_block(hash, Vec::new()); - net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, None); + net.peer(0).announce_block(hash, None); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -702,7 +702,7 @@ fn can_sync_to_peers_with_wrong_common_block() { net.block_until_sync(); - assert!(net.peer(1).client().header(&BlockId::Hash(final_hash)).unwrap().is_some()); + assert!(net.peer(1).has_block(&final_hash)); } /// Returns `is_new_best = true` for each validated announcement. @@ -721,7 +721,6 @@ impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { #[test] fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { sp_tracing::try_init_simple(); - log::trace!(target: "sync", "Test"); let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); net.add_full_peer_with_config(Default::default()); net.add_full_peer_with_config(Default::default()); @@ -763,7 +762,6 @@ impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { #[test] fn wait_until_deferred_block_announce_validation_is_ready() { sp_tracing::try_init_simple(); - log::trace!(target: "sync", "Test"); let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); net.add_full_peer_with_config(Default::default()); net.add_full_peer_with_config(FullPeerConfig { @@ -779,3 +777,161 @@ fn wait_until_deferred_block_announce_validation_is_ready() { net.block_until_idle(); } } + +/// When we don't inform the sync protocol about the best block, a node will not sync from us as the +/// handshake is not does not contain our best block. +#[test] +fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(1); + + // Produce some blocks + let block_hash = net.peer(0).push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true); + + // Add a node and wait until they are connected + net.add_full_peer_with_config(Default::default()); + net.block_until_connected(); + net.block_until_idle(); + + // The peer should not have synced the block. + assert!(!net.peer(1).has_block(&block_hash)); + + // Make sync protocol aware of the best block + net.peer(0).network_service().new_best_block_imported(block_hash, 3); + net.block_until_idle(); + + // Connect another node that should now sync to the tip + net.add_full_peer_with_config(Default::default()); + net.block_until_connected(); + + while !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } + + // However peer 1 should still not have the block. + assert!(!net.peer(1).has_block(&block_hash)); +} + +/// Ensures that if we as a syncing node sync to the tip while we are connected to another peer +/// that is currently also doing a major sync. +#[test] +fn sync_to_tip_when_we_sync_together_with_multiple_peers() { + sp_tracing::try_init_simple(); + + let mut net = TestNet::new(3); + + let block_hash = net.peer(0).push_blocks_at_without_informing_sync( + BlockId::Number(0), + 10_000, + false, + ); + + net.peer(1).push_blocks_at_without_informing_sync( + BlockId::Number(0), + 5_000, + false, + ); + + net.block_until_connected(); + net.block_until_idle(); + + assert!(!net.peer(2).has_block(&block_hash)); + + net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); + while !net.peer(2).has_block(&block_hash) && !net.peer(1).has_block(&block_hash) { + net.block_until_idle(); + } +} + +/// Ensures that when we receive a block announcement with some data attached, that we propagate +/// this data when reannouncing the block. +#[test] +fn block_announce_data_is_propagated() { + struct TestBlockAnnounceValidator; + + impl BlockAnnounceValidator for TestBlockAnnounceValidator { + fn validate( + &mut self, + _: &Header, + data: &[u8], + ) -> Pin>> + Send>> { + let correct = data.get(0) == Some(&137); + async move { + if correct { + Ok(Validation::Success { is_new_best: true }) + } else { + Ok(Validation::Failure { disconnect: false }) + } + }.boxed() + } + } + + sp_tracing::try_init_simple(); + let mut net = TestNet::new(1); + + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)), + ..Default::default() + }); + + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)), + connect_to_peers: Some(vec![1]), + ..Default::default() + }); + + // Wait until peer 1 is connected to both nodes. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).num_peers() == 2 { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + + let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); + net.peer(0).announce_block(block_hash, Some(vec![137])); + + while !net.peer(1).has_block(&block_hash) || !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } +} + +#[test] +fn continue_to_sync_after_some_block_announcement_verifications_failed() { + struct TestBlockAnnounceValidator; + + impl BlockAnnounceValidator for TestBlockAnnounceValidator { + fn validate( + &mut self, + header: &Header, + _: &[u8], + ) -> Pin>> + Send>> { + let number = *header.number(); + async move { + if number < 100 { + Err(Box::::from(String::from("error")) as Box<_>) + } else { + Ok(Validation::Success { is_new_best: false }) + } + }.boxed() + } + } + + sp_tracing::try_init_simple(); + let mut net = TestNet::new(1); + + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)), + ..Default::default() + }); + + net.block_until_connected(); + net.block_until_idle(); + + let block_hash = net.peer(0).push_blocks(500, true); + + net.block_until_sync(); + assert!(net.peer(1).has_block(&block_hash)); +} diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 5686d33da9b23..5671affb6fb75 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0" +version = "3.0.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,33 +14,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = "0.5" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } fnv = "1.0.6" -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-offchain = { version = "3.0.0", path = "../../primitives/offchain" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +parking_lot = "0.11.1" +sp-core = { version = "3.0.0", path = "../../primitives/core" } rand = "0.7.2" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sc-network = { version = "0.8.0", path = "../network" } -sc-keystore = { version = "2.0.0", path = "../keystore" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sc-network = { version = "0.9.0", path = "../network" } +sc-keystore = { version = "3.0.0", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -hyper = "0.13.2" +hyper = "0.13.9" hyper-rustls = "0.21.0" [dev-dependencies] -sc-client-db = { version = "0.8.0", default-features = true, path = "../db/" } -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sc-client-db = { version = "0.9.0", default-features = true, path = "../db" } +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +sc-transaction-pool = { version = "3.0.0", path = "../transaction-pool" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.2" lazy_static = "1.4.0" diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 6fb1da19bf058..64c5060fb0c6f 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use std::{ str::FromStr, @@ -185,9 +187,9 @@ impl OffchainExt for Api { fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { let peer_ids: HashSet = nodes.into_iter() - .filter_map(|node| PeerId::from_bytes(node.0).ok()) + .filter_map(|node| PeerId::from_bytes(&node.0).ok()) .collect(); - + self.network_provider.set_authorized_peers(peer_ids); self.network_provider.set_authorized_only(authorized_only); } @@ -211,7 +213,7 @@ impl NetworkState { impl From for OpaqueNetworkState { fn from(state: NetworkState) -> OpaqueNetworkState { - let enc = Encode::encode(&state.peer_id.into_bytes()); + let enc = Encode::encode(&state.peer_id.to_bytes()); let peer_id = OpaquePeerId::new(enc); let external_addresses: Vec = state @@ -237,7 +239,7 @@ impl TryFrom for NetworkState { let inner_vec = state.peer_id.0; let bytes: Vec = Decode::decode(&mut &inner_vec[..]).map_err(|_| ())?; - let peer_id = PeerId::from_bytes(bytes).map_err(|_| ())?; + let peer_id = PeerId::from_bytes(&bytes).map_err(|_| ())?; let external_addresses: Result, Self::Error> = state.external_addresses .iter() diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 1f542b7c11e19..dbe8e55b3646b 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! This module is composed of two structs: [`HttpApi`] and [`HttpWorker`]. Calling the [`http`] //! function returns a pair of [`HttpApi`] and [`HttpWorker`] that share some state. diff --git a/client/offchain/src/api/http_dummy.rs b/client/offchain/src/api/http_dummy.rs index 1c83325c93b20..ff9c2fb2aa029 100644 --- a/client/offchain/src/api/http_dummy.rs +++ b/client/offchain/src/api/http_dummy.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Contains the same API as the `http` module, except that everything returns an error. diff --git a/client/offchain/src/api/timestamp.rs b/client/offchain/src/api/timestamp.rs index 222d3273cb355..6ea0f000f8d19 100644 --- a/client/offchain/src/api/timestamp.rs +++ b/client/offchain/src/api/timestamp.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Helper methods dedicated to timestamps. @@ -50,7 +52,7 @@ pub fn timestamp_from_now(timestamp: Timestamp) -> Duration { /// If `None`, returns a never-ending `Future`. pub fn deadline_to_future( deadline: Option, -) -> futures::future::MaybeDone { +) -> futures::future::MaybeDone> { use futures::future::{self, Either}; future::maybe_done(match deadline.map(timestamp_from_now) { diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 885294449fb95..b82f89cb95d67 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate offchain workers. //! @@ -58,7 +60,7 @@ pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; pub trait NetworkProvider: NetworkStateInfo { /// Set the authorized peers. fn set_authorized_peers(&self, peers: HashSet); - + /// Set the authorized only flag. fn set_authorized_only(&self, reserved_only: bool); } @@ -236,9 +238,15 @@ mod tests { use super::*; use std::sync::Arc; use sc_network::{Multiaddr, PeerId}; - use substrate_test_runtime_client::{TestClient, runtime::Block}; + use substrate_test_runtime_client::{ + TestClient, runtime::Block, TestClientBuilderExt, + DefaultTestClientBuilderExt, ClientBlockImportExt, + }; use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_transaction_pool::{TransactionPool, InPoolTransaction}; + use sp_consensus::BlockOrigin; + use sc_client_api::Backend as _; + use sc_block_builder::BlockBuilderProvider as _; struct TestNetwork(); @@ -287,6 +295,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = TestPool(BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), @@ -305,4 +314,41 @@ mod tests { assert_eq!(pool.0.status().ready, 1); assert_eq!(pool.0.ready().next().unwrap().is_propagable(), false); } + + #[test] + fn offchain_index_set_and_clear_works() { + sp_tracing::try_init_simple(); + + let (client, backend) = + substrate_test_runtime_client::TestClientBuilder::new() + .enable_offchain_indexing_api() + .build_with_backend(); + let mut client = Arc::new(client); + let offchain_db = backend.offchain_storage().unwrap(); + + let key = &b"hello"[..]; + let value = &b"world"[..]; + let mut block_builder = client.new_block(Default::default()).unwrap(); + block_builder.push( + substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexSet( + key.to_vec(), + value.to_vec(), + ), + ).unwrap(); + + let block = block_builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); + + let mut block_builder = client.new_block(Default::default()).unwrap(); + block_builder.push( + substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexClear(key.to_vec()), + ).unwrap(); + + let block = block_builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert!(offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).is_none()); + } } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 459f4a9302045..90f7820017e2f 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,7 +3,7 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" name = "sc-peerset" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" repository = "https://github.com/paritytech/substrate/" @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.4" -libp2p = { version = "0.29.1", default-features = false } -sp-utils = { version = "2.0.0", path = "../../primitives/utils"} +futures = "0.3.9" +libp2p = { version = "0.34.0", default-features = false } +sp-utils = { version = "3.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" wasm-timer = "0.2" diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 575743afa079c..31162930efc67 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,14 +18,27 @@ //! Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be //! connected to. +//! +//! The PSM handles *sets* of nodes. A set of nodes is defined as the nodes that are believed to +//! support a certain capability, such as handling blocks and transactions of a specific chain, +//! or collating a certain parachain. +//! +//! For each node in each set, the peerset holds a flag specifying whether the node is +//! connected to us or not. +//! +//! This connected/disconnected status is specific to the node and set combination, and it is for +//! example possible for a node to be connected through a specific set but not another. +//! +//! In addition, for each, set, the peerset also holds a list of reserved nodes towards which it +//! will at all time try to maintain a connection with. mod peersstate; -use std::{collections::{HashSet, HashMap}, collections::VecDeque}; +use std::{collections::HashSet, collections::VecDeque}; use futures::prelude::*; use log::{debug, error, trace}; use serde_json::json; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; +use std::{collections::HashMap, pin::Pin, task::{Context, Poll}, time::Duration}; use wasm_timer::Instant; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; @@ -35,22 +48,46 @@ pub use libp2p::PeerId; const BANNED_THRESHOLD: i32 = 82 * (i32::min_value() / 100); /// Reputation change for a node when we get disconnected from it. const DISCONNECT_REPUTATION_CHANGE: i32 = -256; -/// Reserved peers group ID -const RESERVED_NODES: &'static str = "reserved"; /// Amount of time between the moment we disconnect from a node and the moment we remove it from /// the list. const FORGET_AFTER: Duration = Duration::from_secs(3600); #[derive(Debug)] enum Action { - AddReservedPeer(PeerId), - RemoveReservedPeer(PeerId), - SetReservedPeers(HashSet), - SetReservedOnly(bool), + AddReservedPeer(SetId, PeerId), + RemoveReservedPeer(SetId, PeerId), + SetReservedPeers(SetId, HashSet), + SetReservedOnly(SetId, bool), ReportPeer(PeerId, ReputationChange), - SetPriorityGroup(String, HashSet), - AddToPriorityGroup(String, PeerId), - RemoveFromPriorityGroup(String, PeerId), + AddToPeersSet(SetId, PeerId), + RemoveFromPeersSet(SetId, PeerId), +} + +/// Identifier of a set in the peerset. +/// +/// Can be constructed using the `From` trait implementation based on the index of the set +/// within [`PeersetConfig::sets`]. For example, the first element of [`PeersetConfig::sets`] is +/// later referred to with `SetId::from(0)`. It is intended that the code responsible for building +/// the [`PeersetConfig`] is also responsible for constructing the [`SetId`]s. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SetId(usize); + +impl SetId { + pub const fn from(id: usize) -> Self { + SetId(id) + } +} + +impl From for SetId { + fn from(id: usize) -> Self { + SetId(id) + } +} + +impl From for usize { + fn from(id: SetId) -> Self { + id.0 + } } /// Description of a reputation adjustment for a node. @@ -87,26 +124,27 @@ impl PeersetHandle { /// Has no effect if the node was already a reserved peer. /// /// > **Note**: Keep in mind that the networking has to know an address for this node, - /// > otherwise it will not be able to connect to it. - pub fn add_reserved_peer(&self, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddReservedPeer(peer_id)); + /// > otherwise it will not be able to connect to it. + pub fn add_reserved_peer(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::AddReservedPeer(set_id, peer_id)); } /// Remove a previously-added reserved peer. /// /// Has no effect if the node was not a reserved peer. - pub fn remove_reserved_peer(&self, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(peer_id)); + pub fn remove_reserved_peer(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(set_id, peer_id)); } - /// Sets whether or not the peerset only has connections . - pub fn set_reserved_only(&self, reserved: bool) { - let _ = self.tx.unbounded_send(Action::SetReservedOnly(reserved)); + /// Sets whether or not the peerset only has connections with nodes marked as reserved for + /// the given set. + pub fn set_reserved_only(&self, set_id: SetId, reserved: bool) { + let _ = self.tx.unbounded_send(Action::SetReservedOnly(set_id, reserved)); } - + /// Set reserved peers to the new set. - pub fn set_reserved_peers(&self, peer_ids: HashSet) { - let _ = self.tx.unbounded_send(Action::SetReservedPeers(peer_ids)); + pub fn set_reserved_peers(&self, set_id: SetId, peer_ids: HashSet) { + let _ = self.tx.unbounded_send(Action::SetReservedPeers(set_id, peer_ids)); } /// Reports an adjustment to the reputation of the given peer. @@ -114,19 +152,14 @@ impl PeersetHandle { let _ = self.tx.unbounded_send(Action::ReportPeer(peer_id, score_diff)); } - /// Modify a priority group. - pub fn set_priority_group(&self, group_id: String, peers: HashSet) { - let _ = self.tx.unbounded_send(Action::SetPriorityGroup(group_id, peers)); - } - - /// Add a peer to a priority group. - pub fn add_to_priority_group(&self, group_id: String, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddToPriorityGroup(group_id, peer_id)); + /// Add a peer to a set. + pub fn add_to_peers_set(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::AddToPeersSet(set_id, peer_id)); } - /// Remove a peer from a priority group. - pub fn remove_from_priority_group(&self, group_id: String, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveFromPriorityGroup(group_id, peer_id)); + /// Remove a peer from a set. + pub fn remove_from_peers_set(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::RemoveFromPeersSet(set_id, peer_id)); } } @@ -135,10 +168,18 @@ impl PeersetHandle { pub enum Message { /// Request to open a connection to the given peer. From the point of view of the PSM, we are /// immediately connected. - Connect(PeerId), + Connect { + set_id: SetId, + /// Peer to connect to. + peer_id: PeerId, + }, /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. - Drop(PeerId), + Drop { + set_id: SetId, + /// Peer to disconnect from. + peer_id: PeerId, + }, /// Equivalent to `Connect` for the peer corresponding to this incoming index. Accept(IncomingIndex), @@ -160,26 +201,33 @@ impl From for IncomingIndex { /// Configuration to pass when creating the peer set manager. #[derive(Debug)] pub struct PeersetConfig { + /// List of sets of nodes the peerset manages. + pub sets: Vec, +} + +/// Configuration for a single set of nodes. +#[derive(Debug)] +pub struct SetConfig { /// Maximum number of ingoing links to peers. pub in_peers: u32, /// Maximum number of outgoing links to peers. pub out_peers: u32, - /// List of bootstrap nodes to initialize the peer with. + /// List of bootstrap nodes to initialize the set with. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. + /// > otherwise it will not be able to connect to them. pub bootnodes: Vec, - /// If true, we only accept nodes in [`PeersetConfig::priority_groups`]. - pub reserved_only: bool, - /// Lists of nodes we should always be connected to. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, /// > otherwise it will not be able to connect to them. - pub priority_groups: Vec<(String, HashSet)>, + pub reserved_nodes: HashSet, + + /// If true, we only accept nodes in [`SetConfig::reserved_nodes`]. + pub reserved_only: bool, } /// Side of the peer set manager owned by the network. In other words, the "receiving" side. @@ -190,11 +238,10 @@ pub struct PeersetConfig { pub struct Peerset { /// Underlying data structure for the nodes's states. data: peersstate::PeersState, - /// If true, we only accept reserved nodes. - reserved_only: bool, - /// Lists of nodes that don't occupy slots and that we should try to always be connected to. - /// Is kept in sync with the list of reserved nodes in [`Peerset::data`]. - priority_groups: HashMap>, + /// For each set, lists of nodes that don't occupy slots and that we should try to always be + /// connected to, and whether only reserved nodes are accepted. Is kept in sync with the list + /// of non-slot-occupying nodes in [`Peerset::data`]. + reserved_nodes: Vec<(HashSet, bool)>, /// Receiver for messages from the `PeersetHandle` and from `tx`. rx: TracingUnboundedReceiver, /// Sending side of `rx`. @@ -216,28 +263,36 @@ impl Peerset { tx: tx.clone(), }; - let now = Instant::now(); - - let mut peerset = Peerset { - data: peersstate::PeersState::new(config.in_peers, config.out_peers), - tx, - rx, - reserved_only: config.reserved_only, - priority_groups: config.priority_groups.clone().into_iter().collect(), - message_queue: VecDeque::new(), - created: now, - latest_time_update: now, + let mut peerset = { + let now = Instant::now(); + + Peerset { + data: peersstate::PeersState::new(config.sets.iter().map(|set| peersstate::SetConfig { + in_peers: set.in_peers, + out_peers: set.out_peers, + })), + tx, + rx, + reserved_nodes: config.sets.iter().map(|set| { + (set.reserved_nodes.clone(), set.reserved_only) + }).collect(), + message_queue: VecDeque::new(), + created: now, + latest_time_update: now, + } }; - for node in config.priority_groups.into_iter().flat_map(|(_, l)| l) { - peerset.data.add_no_slot_node(node); - } + for (set, set_config) in config.sets.into_iter().enumerate() { + for node in set_config.reserved_nodes { + peerset.data.add_no_slot_node(set, node); + } - for peer_id in config.bootnodes { - if let peersstate::Peer::Unknown(entry) = peerset.data.peer(&peer_id) { - entry.discover(); - } else { - debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); + for peer_id in set_config.bootnodes { + if let peersstate::Peer::Unknown(entry) = peerset.data.peer(set, &peer_id) { + entry.discover(); + } else { + debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); + } } } @@ -245,96 +300,109 @@ impl Peerset { (peerset, handle) } - fn on_add_reserved_peer(&mut self, peer_id: PeerId) { - self.on_add_to_priority_group(RESERVED_NODES, peer_id); - } + fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { + let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id.clone()); + if !newly_inserted { + return; + } - fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { - self.on_remove_from_priority_group(RESERVED_NODES, peer_id); - } - - fn on_set_reserved_peers(&mut self, peer_ids: HashSet) { - self.on_set_priority_group(RESERVED_NODES, peer_ids); + self.data.add_no_slot_node(set_id.0, peer_id); + self.alloc_slots(); } - fn on_set_reserved_only(&mut self, reserved_only: bool) { - self.reserved_only = reserved_only; + fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { + if !self.reserved_nodes[set_id.0].0.remove(&peer_id) { + return; + } - if self.reserved_only { - // Disconnect all the nodes that aren't reserved. - for peer_id in self.data.connected_peers().cloned().collect::>().into_iter() { - if self.priority_groups.get(RESERVED_NODES).map_or(false, |g| g.contains(&peer_id)) { - continue; - } + self.data.remove_no_slot_node(set_id.0, &peer_id); - let peer = self.data.peer(&peer_id).into_connected() - .expect("We are enumerating connected peers, therefore the peer is connected; qed"); - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } + // Nothing more to do if not in reserved-only mode. + if !self.reserved_nodes[set_id.0].1 { + return; + } - } else { - self.alloc_slots(); + // If, however, the peerset is in reserved-only mode, then the removed node needs to be + // disconnected. + if let peersstate::Peer::Connected(peer) = self.data.peer(set_id.0, &peer_id) { + peer.disconnect(); + self.message_queue.push_back(Message::Drop { + set_id, + peer_id, + }); } } - fn on_set_priority_group(&mut self, group_id: &str, peers: HashSet) { + fn on_set_reserved_peers(&mut self, set_id: SetId, peer_ids: HashSet) { // Determine the difference between the current group and the new list. let (to_insert, to_remove) = { - let current_group = self.priority_groups.entry(group_id.to_owned()).or_default(); - let to_insert = peers.difference(current_group) + let to_insert = peer_ids.difference(&self.reserved_nodes[set_id.0].0) .cloned().collect::>(); - let to_remove = current_group.difference(&peers) + let to_remove = self.reserved_nodes[set_id.0].0.difference(&peer_ids) .cloned().collect::>(); (to_insert, to_remove) }; - // Enumerate elements in `peers` not in `current_group`. - for peer_id in &to_insert { - // We don't call `on_add_to_priority_group` here in order to avoid calling - // `alloc_slots` all the time. - self.priority_groups.entry(group_id.to_owned()).or_default().insert(peer_id.clone()); - self.data.add_no_slot_node(peer_id.clone()); + for node in to_insert { + self.on_add_reserved_peer(set_id, node); } - // Enumerate elements in `current_group` not in `peers`. - for peer in to_remove { - self.on_remove_from_priority_group(group_id, peer); + for node in to_remove { + self.on_remove_reserved_peer(set_id, node); } + } + + fn on_set_reserved_only(&mut self, set_id: SetId, reserved_only: bool) { + self.reserved_nodes[set_id.0].1 = reserved_only; + + if reserved_only { + // Disconnect all the nodes that aren't reserved. + for peer_id in self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() { + if self.reserved_nodes[set_id.0].0.contains(&peer_id) { + continue; + } + + let peer = self.data.peer(set_id.0, &peer_id).into_connected() + .expect("We are enumerating connected peers, therefore the peer is connected; qed"); + peer.disconnect(); + self.message_queue.push_back(Message::Drop { + set_id, + peer_id + }); + } - if !to_insert.is_empty() { + } else { self.alloc_slots(); } } - fn on_add_to_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - self.priority_groups.entry(group_id.to_owned()).or_default().insert(peer_id.clone()); - self.data.add_no_slot_node(peer_id); - self.alloc_slots(); + /// Adds a node to the given set. The peerset will, if possible and not already the case, + /// try to connect to it. + /// + /// > **Note**: This has the same effect as [`PeersetHandle::add_to_peers_set`]. + pub fn add_to_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { + if let peersstate::Peer::Unknown(entry) = self.data.peer(set_id.0, &peer_id) { + entry.discover(); + self.alloc_slots(); + } } - fn on_remove_from_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - if let Some(priority_group) = self.priority_groups.get_mut(group_id) { - if !priority_group.remove(&peer_id) { - // `PeerId` wasn't in the group in the first place. - return; - } - } else { - // Group doesn't exist, so the `PeerId` can't be in it. + fn on_remove_from_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { + // Don't do anything if node is reserved. + if self.reserved_nodes[set_id.0].0.contains(&peer_id) { return; } - // If that `PeerId` isn't in any other group, then it is no longer no-slot-occupying. - if !self.priority_groups.values().any(|l| l.contains(&peer_id)) { - self.data.remove_no_slot_node(&peer_id); - } - - // Disconnect the peer if necessary. - if group_id != RESERVED_NODES && self.reserved_only { - if let peersstate::Peer::Connected(peer) = self.data.peer(&peer_id) { - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); + match self.data.peer(set_id.0, &peer_id) { + peersstate::Peer::Connected(peer) => { + self.message_queue.push_back(Message::Drop { + set_id, + peer_id: peer.peer_id().clone(), + }); + peer.disconnect().forget_peer(); } + peersstate::Peer::NotConnected(peer) => { peer.forget_peer(); } + peersstate::Peer::Unknown(_) => {} } } @@ -342,23 +410,29 @@ impl Peerset { // We want reputations to be up-to-date before adjusting them. self.update_time(); - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut peer) => { - peer.add_reputation(change.value); - if peer.reputation() < BANNED_THRESHOLD { - debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", - peer_id, change.value, peer.reputation(), change.reason - ); - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } else { - trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", - peer_id, change.value, peer.reputation(), change.reason - ); - } - }, - peersstate::Peer::NotConnected(mut peer) => peer.add_reputation(change.value), - peersstate::Peer::Unknown(peer) => peer.discover().add_reputation(change.value), + let mut reputation = self.data.peer_reputation(peer_id.clone()); + reputation.add_reputation(change.value); + if reputation.reputation() >= BANNED_THRESHOLD { + trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", + peer_id, change.value, reputation.reputation(), change.reason + ); + return; + } + + debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", + peer_id, change.value, reputation.reputation(), change.reason + ); + + drop(reputation); + + for set_index in 0..self.data.num_sets() { + if let peersstate::Peer::Connected(peer) = self.data.peer(set_index, &peer_id) { + let peer = peer.disconnect(); + self.message_queue.push_back(Message::Drop { + set_id: SetId(set_index), + peer_id: peer.into_peer_id(), + }); + } } } @@ -393,27 +467,35 @@ impl Peerset { } reput.saturating_sub(diff) } - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut peer) => { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) - } - peersstate::Peer::NotConnected(mut peer) => { - if peer.reputation() == 0 && - peer.last_connected_or_discovered() + FORGET_AFTER < now - { - peer.forget_peer(); - } else { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) + + let mut peer_reputation = self.data.peer_reputation(peer_id.clone()); + + let before = peer_reputation.reputation(); + let after = reput_tick(before); + trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); + peer_reputation.set_reputation(after); + + if after != 0 { + continue; + } + + drop(peer_reputation); + + // If the peer reaches a reputation of 0, and there is no connection to it, + // forget it. + for set_index in 0..self.data.num_sets() { + match self.data.peer(set_index, &peer_id) { + peersstate::Peer::Connected(_) => {} + peersstate::Peer::NotConnected(peer) => { + if peer.last_connected_or_discovered() + FORGET_AFTER < now { + peer.forget_peer(); + } + } + peersstate::Peer::Unknown(_) => { + // Happens if this peer does not belong to this set. } } - peersstate::Peer::Unknown(_) => unreachable!("We iterate over known peers; qed") - }; + } } } } @@ -423,95 +505,54 @@ impl Peerset { self.update_time(); // Try to connect to all the reserved nodes that we are not connected to. - loop { - let next = { - let data = &mut self.data; - self.priority_groups - .get(RESERVED_NODES) - .into_iter() - .flatten() - .filter(move |n| { - data.peer(n).into_connected().is_none() - }) - .next() - .cloned() - }; - - let next = match next { - Some(n) => n, - None => break, - }; + for set_index in 0..self.data.num_sets() { + for reserved_node in &self.reserved_nodes[set_index].0 { + let entry = match self.data.peer(set_index, reserved_node) { + peersstate::Peer::Unknown(n) => n.discover(), + peersstate::Peer::NotConnected(n) => n, + peersstate::Peer::Connected(_) => continue, + }; - let next = match self.data.peer(&next) { - peersstate::Peer::Unknown(n) => n.discover(), - peersstate::Peer::NotConnected(n) => n, - peersstate::Peer::Connected(_) => { - debug_assert!(false, "State inconsistency: not connected state"); - break; + match entry.try_outgoing() { + Ok(conn) => self.message_queue.push_back(Message::Connect { + set_id: SetId(set_index), + peer_id: conn.into_peer_id() + }), + Err(_) => { + // An error is returned only if no slot is available. Reserved nodes are + // marked in the state machine with a flag saying "doesn't occupy a slot", + // and as such this should never happen. + debug_assert!(false); + log::error!( + target: "peerset", + "Not enough slots to connect to reserved node" + ); + } } - }; - - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. } } - // Nothing more to do if we're in reserved mode. - if self.reserved_only { - return; - } - - // Try to connect to all the nodes in priority groups and that we are not connected to. - loop { - let next = { - let data = &mut self.data; - self.priority_groups - .values() - .flatten() - .filter(move |n| { - data.peer(n).into_connected().is_none() - }) - .next() - .cloned() - }; - - let next = match next { - Some(n) => n, - None => break, - }; - - let next = match self.data.peer(&next) { - peersstate::Peer::Unknown(n) => n.discover(), - peersstate::Peer::NotConnected(n) => n, - peersstate::Peer::Connected(_) => { - debug_assert!(false, "State inconsistency: not connected state"); - break; - } - }; - - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. + // Now, we try to connect to other nodes. + for set_index in 0..self.data.num_sets() { + // Nothing more to do if we're in reserved mode. + if self.reserved_nodes[set_index].1 { + continue; } - } - // Now, we try to connect to non-priority nodes. - loop { // Try to grab the next node to attempt to connect to. - let next = match self.data.highest_not_connected_peer() { - Some(p) => p, - None => break, // No known node to add. - }; - - // Don't connect to nodes with an abysmal reputation. - if next.reputation() < BANNED_THRESHOLD { - break; - } + while let Some(next) = self.data.highest_not_connected_peer(set_index) { + // Don't connect to nodes with an abysmal reputation. + if next.reputation() < BANNED_THRESHOLD { + break; + } - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. + match next.try_outgoing() { + Ok(conn) => self.message_queue.push_back(Message::Connect { + set_id: SetId(set_index), + peer_id: conn.into_peer_id() + }), + Err(_) => break, // No more slots available. + } } } } @@ -526,18 +567,19 @@ impl Peerset { // Implementation note: because of concurrency issues, it is possible that we push a `Connect` // message to the output channel with a `PeerId`, and that `incoming` gets called with the same // `PeerId` before that message has been read by the user. In this situation we must not answer. - pub fn incoming(&mut self, peer_id: PeerId, index: IncomingIndex) { + pub fn incoming(&mut self, set_id: SetId, peer_id: PeerId, index: IncomingIndex) { trace!(target: "peerset", "Incoming {:?}", peer_id); + self.update_time(); - if self.reserved_only { - if !self.priority_groups.get(RESERVED_NODES).map_or(false, |n| n.contains(&peer_id)) { + if self.reserved_nodes[set_id.0].1 { + if !self.reserved_nodes[set_id.0].0.contains(&peer_id) { self.message_queue.push_back(Message::Reject(index)); return; } } - let not_connected = match self.data.peer(&peer_id) { + let not_connected = match self.data.peer(set_id.0, &peer_id) { // If we're already connected, don't answer, as the docs mention. peersstate::Peer::Connected(_) => return, peersstate::Peer::NotConnected(mut entry) => { @@ -562,42 +604,27 @@ impl Peerset { /// /// Must only be called after the PSM has either generated a `Connect` message with this /// `PeerId`, or accepted an incoming connection with this `PeerId`. - pub fn dropped(&mut self, peer_id: PeerId) { - trace!(target: "peerset", "Dropping {:?}", peer_id); - + pub fn dropped(&mut self, set_id: SetId, peer_id: PeerId, reason: DropReason) { // We want reputations to be up-to-date before adjusting them. self.update_time(); - match self.data.peer(&peer_id) { + match self.data.peer(set_id.0, &peer_id) { peersstate::Peer::Connected(mut entry) => { // Decrease the node's reputation so that we don't try it again and again and again. entry.add_reputation(DISCONNECT_REPUTATION_CHANGE); + trace!(target: "peerset", "Dropping {}: {:+} to {}", + peer_id, DISCONNECT_REPUTATION_CHANGE, entry.reputation()); entry.disconnect(); } peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => error!(target: "peerset", "Received dropped() for non-connected node"), } - self.alloc_slots(); - } - - /// Adds discovered peer ids to the PSM. - /// - /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility - /// > of the PSM to remove `PeerId`s that fail to dial too often. - pub fn discovered>(&mut self, peer_ids: I) { - let mut discovered_any = false; - - for peer_id in peer_ids { - if let peersstate::Peer::Unknown(entry) = self.data.peer(&peer_id) { - entry.discover(); - discovered_any = true; - } + if let DropReason::Refused = reason { + self.on_remove_from_peers_set(set_id, peer_id); } - if discovered_any { - self.alloc_slots(); - } + self.alloc_slots(); } /// Reports an adjustment to the reputation of the given peer. @@ -613,23 +640,29 @@ impl Peerset { self.update_time(); json!({ - "nodes": self.data.peers().cloned().collect::>().into_iter().map(|peer_id| { - let state = match self.data.peer(&peer_id) { - peersstate::Peer::Connected(entry) => json!({ - "connected": true, - "reputation": entry.reputation() - }), - peersstate::Peer::NotConnected(entry) => json!({ - "connected": false, - "reputation": entry.reputation() - }), - peersstate::Peer::Unknown(_) => - unreachable!("We iterate over the known peers; QED") - }; - - (peer_id.to_base58(), state) - }).collect::>(), - "reserved_only": self.reserved_only, + "sets": (0..self.data.num_sets()).map(|set_index| { + json!({ + "nodes": self.data.peers().cloned().collect::>().into_iter().filter_map(|peer_id| { + let state = match self.data.peer(set_index, &peer_id) { + peersstate::Peer::Connected(entry) => json!({ + "connected": true, + "reputation": entry.reputation() + }), + peersstate::Peer::NotConnected(entry) => json!({ + "connected": false, + "reputation": entry.reputation() + }), + peersstate::Peer::Unknown(_) => return None, + }; + + Some((peer_id.to_base58(), state)) + }).collect::>(), + "reserved_nodes": self.reserved_nodes[set_index].0.iter().map(|peer_id| { + peer_id.to_base58() + }).collect::>(), + "reserved_only": self.reserved_nodes[set_index].1, + }) + }).collect::>(), "message_queue": self.message_queue.len(), }) } @@ -638,11 +671,6 @@ impl Peerset { pub fn num_discovered_peers(&self) -> usize { self.data.peers().len() } - - /// Returns the content of a priority group. - pub fn priority_group(&self, group_id: &str) -> Option> { - self.priority_groups.get(group_id).map(|l| l.iter()) - } } impl Stream for Peerset { @@ -661,32 +689,41 @@ impl Stream for Peerset { }; match action { - Action::AddReservedPeer(peer_id) => - self.on_add_reserved_peer(peer_id), - Action::RemoveReservedPeer(peer_id) => - self.on_remove_reserved_peer(peer_id), - Action::SetReservedPeers(peer_ids) => - self.on_set_reserved_peers(peer_ids), - Action::SetReservedOnly(reserved) => - self.on_set_reserved_only(reserved), + Action::AddReservedPeer(set_id, peer_id) => + self.on_add_reserved_peer(set_id, peer_id), + Action::RemoveReservedPeer(set_id, peer_id) => + self.on_remove_reserved_peer(set_id, peer_id), + Action::SetReservedPeers(set_id, peer_ids) => + self.on_set_reserved_peers(set_id, peer_ids), + Action::SetReservedOnly(set_id, reserved) => + self.on_set_reserved_only(set_id, reserved), Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), - Action::SetPriorityGroup(group_id, peers) => - self.on_set_priority_group(&group_id, peers), - Action::AddToPriorityGroup(group_id, peer_id) => - self.on_add_to_priority_group(&group_id, peer_id), - Action::RemoveFromPriorityGroup(group_id, peer_id) => - self.on_remove_from_priority_group(&group_id, peer_id), + Action::AddToPeersSet(sets_name, peer_id) => + self.add_to_peers_set(sets_name, peer_id), + Action::RemoveFromPeersSet(sets_name, peer_id) => + self.on_remove_from_peers_set(sets_name, peer_id), } } } } +/// Reason for calling [`Peerset::dropped`]. +pub enum DropReason { + /// Substream or connection has been closed for an unknown reason. + Unknown, + /// Substream or connection has been explicitly refused by the target. In other words, the + /// peer doesn't actually belong to this set. + /// + /// This has the side effect of calling [`PeersetHandle::remove_from_peers_set`]. + Refused, +} + #[cfg(test)] mod tests { use libp2p::PeerId; use futures::prelude::*; - use super::{PeersetConfig, Peerset, Message, IncomingIndex, ReputationChange, BANNED_THRESHOLD}; + use super::{PeersetConfig, Peerset, Message, IncomingIndex, ReputationChange, SetConfig, SetId, BANNED_THRESHOLD}; use std::{pin::Pin, task::Poll, thread, time::Duration}; fn assert_messages(mut peerset: Peerset, messages: Vec) -> Peerset { @@ -710,20 +747,22 @@ mod tests { let reserved_peer = PeerId::random(); let reserved_peer2 = PeerId::random(); let config = PeersetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode], - reserved_only: true, - priority_groups: Vec::new(), + sets: vec![SetConfig { + in_peers: 0, + out_peers: 2, + bootnodes: vec![bootnode], + reserved_nodes: Default::default(), + reserved_only: true, + }], }; let (peerset, handle) = Peerset::from_config(config); - handle.add_reserved_peer(reserved_peer.clone()); - handle.add_reserved_peer(reserved_peer2.clone()); + handle.add_reserved_peer(SetId::from(0), reserved_peer.clone()); + handle.add_reserved_peer(SetId::from(0), reserved_peer2.clone()); assert_messages(peerset, vec![ - Message::Connect(reserved_peer), - Message::Connect(reserved_peer2) + Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer }, + Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer2 } ]); } @@ -738,21 +777,23 @@ mod tests { let ii3 = IncomingIndex(3); let ii4 = IncomingIndex(3); let config = PeersetConfig { - in_peers: 2, - out_peers: 1, - bootnodes: vec![bootnode.clone()], - reserved_only: false, - priority_groups: Vec::new(), + sets: vec![SetConfig { + in_peers: 2, + out_peers: 1, + bootnodes: vec![bootnode.clone()], + reserved_nodes: Default::default(), + reserved_only: false, + }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.incoming(incoming.clone(), ii); - peerset.incoming(incoming.clone(), ii4); - peerset.incoming(incoming2.clone(), ii2); - peerset.incoming(incoming3.clone(), ii3); + peerset.incoming(SetId::from(0), incoming.clone(), ii); + peerset.incoming(SetId::from(0), incoming.clone(), ii4); + peerset.incoming(SetId::from(0), incoming2.clone(), ii2); + peerset.incoming(SetId::from(0), incoming3.clone(), ii3); assert_messages(peerset, vec![ - Message::Connect(bootnode.clone()), + Message::Connect { set_id: SetId::from(0), peer_id: bootnode.clone() }, Message::Accept(ii), Message::Accept(ii2), Message::Reject(ii3), @@ -764,15 +805,17 @@ mod tests { let incoming = PeerId::random(); let ii = IncomingIndex(1); let config = PeersetConfig { - in_peers: 50, - out_peers: 50, - bootnodes: vec![], - reserved_only: true, - priority_groups: vec![], + sets: vec![SetConfig { + in_peers: 50, + out_peers: 50, + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: true, + }], }; let (mut peerset, _) = Peerset::from_config(config); - peerset.incoming(incoming.clone(), ii); + peerset.incoming(SetId::from(0), incoming.clone(), ii); assert_messages(peerset, vec![ Message::Reject(ii), @@ -785,32 +828,36 @@ mod tests { let discovered = PeerId::random(); let discovered2 = PeerId::random(); let config = PeersetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode.clone()], - reserved_only: false, - priority_groups: vec![], + sets: vec![SetConfig { + in_peers: 0, + out_peers: 2, + bootnodes: vec![bootnode.clone()], + reserved_nodes: Default::default(), + reserved_only: false, + }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.discovered(Some(discovered.clone())); - peerset.discovered(Some(discovered.clone())); - peerset.discovered(Some(discovered2)); + peerset.add_to_peers_set(SetId::from(0), discovered.clone()); + peerset.add_to_peers_set(SetId::from(0), discovered.clone()); + peerset.add_to_peers_set(SetId::from(0), discovered2); assert_messages(peerset, vec![ - Message::Connect(bootnode), - Message::Connect(discovered), + Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, + Message::Connect { set_id: SetId::from(0), peer_id: discovered }, ]); } #[test] fn test_peerset_banned() { let (mut peerset, handle) = Peerset::from_config(PeersetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: vec![], - reserved_only: false, - priority_groups: vec![], + sets: vec![SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: false, + }], }); // We ban a node by setting its reputation under the threshold. @@ -822,7 +869,7 @@ mod tests { assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); // Check that an incoming connection from that node gets refused. - peerset.incoming(peer_id.clone(), IncomingIndex(1)); + peerset.incoming(SetId::from(0), peer_id.clone(), IncomingIndex(1)); if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); } else { @@ -833,7 +880,7 @@ mod tests { thread::sleep(Duration::from_millis(1500)); // Try again. This time the node should be accepted. - peerset.incoming(peer_id.clone(), IncomingIndex(2)); + peerset.incoming(SetId::from(0), peer_id.clone(), IncomingIndex(2)); while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { assert_eq!(msg.unwrap(), Message::Accept(IncomingIndex(2))); } diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 59879f629e31e..c79dac5e10a7b 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -1,25 +1,28 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Reputation and slots allocation system behind the peerset. //! //! The [`PeersState`] state machine is responsible for managing the reputation and allocating -//! slots. It holds a list of nodes, each associated with a reputation value and whether we are -//! connected or not to this node. Thanks to this list, it knows how many slots are occupied. It -//! also holds a list of nodes which don't occupy slots. +//! slots. It holds a list of nodes, each associated with a reputation value, a list of sets the +//! node belongs to, and for each set whether we are connected or not to this node. Thanks to this +//! list, it knows how many slots are occupied. It also holds a list of nodes which don't occupy +//! slots. //! //! > Note: This module is purely dedicated to managing slots and reputations. Features such as //! > for example connecting to some nodes in priority should be added outside of this @@ -27,7 +30,10 @@ use libp2p::PeerId; use log::error; -use std::{borrow::Cow, collections::{HashSet, HashMap}}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet, hash_map::{Entry, OccupiedEntry}}, +}; use wasm_timer::Instant; /// State storage behind the peerset. @@ -42,20 +48,37 @@ pub struct PeersState { /// List of nodes that we know about. /// /// > **Note**: This list should really be ordered by decreasing reputation, so that we can - /// easily select the best node to connect to. As a first draft, however, we don't - /// sort, to make the logic easier. + /// easily select the best node to connect to. As a first draft, however, we don't + /// sort, to make the logic easier. nodes: HashMap, - /// Number of slot-occupying nodes for which the `ConnectionState` is `In`. + /// Configuration of each set. The size of this `Vec` is never modified. + sets: Vec, +} + +/// Configuration of a single set. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct SetConfig { + /// Maximum allowed number of slot-occupying nodes for ingoing connections. + pub in_peers: u32, + + /// Maximum allowed number of slot-occupying nodes for outgoing connections. + pub out_peers: u32, +} + +/// State of a single set. +#[derive(Debug, Clone, PartialEq, Eq)] +struct SetInfo { + /// Number of slot-occupying nodes for which the `MembershipState` is `In`. num_in: u32, - /// Number of slot-occupying nodes for which the `ConnectionState` is `In`. + /// Number of slot-occupying nodes for which the `MembershipState` is `In`. num_out: u32, - /// Maximum allowed number of slot-occupying nodes for which the `ConnectionState` is `In`. + /// Maximum allowed number of slot-occupying nodes for which the `MembershipState` is `In`. max_in: u32, - /// Maximum allowed number of slot-occupying nodes for which the `ConnectionState` is `Out`. + /// Maximum allowed number of slot-occupying nodes for which the `MembershipState` is `Out`. max_out: u32, /// List of node identities (discovered or not) that don't occupy slots. @@ -67,35 +90,37 @@ pub struct PeersState { } /// State of a single node that we know about. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] struct Node { - /// Whether we are connected to this node. - connection_state: ConnectionState, + /// List of sets the node belongs to. + /// Always has a fixed size equal to the one of [`PeersState::set`]. The various possible sets + /// are indices into this `Vec`. + sets: Vec, /// Reputation value of the node, between `i32::min_value` (we hate that node) and /// `i32::max_value` (we love that node). reputation: i32, } -impl Default for Node { - fn default() -> Node { +impl Node { + fn new(num_sets: usize) -> Node { Node { - connection_state: ConnectionState::NotConnected { - last_connected: Instant::now(), - }, + sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0, } } } -/// Whether we are connected to a node. +/// Whether we are connected to a node in the context of a specific set. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -enum ConnectionState { +enum MembershipState { + /// Node isn't part of that set. + NotMember, /// We are connected through an ingoing connection. In, /// We are connected through an outgoing connection. Out, - /// We are not connected to this node. + /// Node is part of that set, but we are not connected to it. NotConnected { /// When we were last connected to the node, or if we were never connected when we /// discovered it. @@ -103,50 +128,87 @@ enum ConnectionState { }, } -impl ConnectionState { +impl MembershipState { /// Returns `true` for `In` and `Out`. fn is_connected(self) -> bool { match self { - ConnectionState::In => true, - ConnectionState::Out => true, - ConnectionState::NotConnected { .. } => false, + MembershipState::NotMember => false, + MembershipState::In => true, + MembershipState::Out => true, + MembershipState::NotConnected { .. } => false, } } } impl PeersState { /// Builds a new empty `PeersState`. - pub fn new(in_peers: u32, out_peers: u32) -> Self { + pub fn new(sets: impl IntoIterator) -> Self { PeersState { nodes: HashMap::new(), - num_in: 0, - num_out: 0, - max_in: in_peers, - max_out: out_peers, - no_slot_nodes: HashSet::new(), + sets: sets + .into_iter() + .map(|config| SetInfo { + num_in: 0, + num_out: 0, + max_in: config.in_peers, + max_out: config.out_peers, + no_slot_nodes: HashSet::new(), + }) + .collect(), } } - /// Returns an object that grants access to the state of a peer. - pub fn peer<'a>(&'a mut self, peer_id: &'a PeerId) -> Peer<'a> { - match self.nodes.get_mut(peer_id) { - None => return Peer::Unknown(UnknownPeer { + /// Returns the number of sets. + /// + /// Corresponds to the number of elements passed to [`PeersState::new`]. + pub fn num_sets(&self) -> usize { + self.sets.len() + } + + /// Returns an object that grants access to the reputation value of a peer. + pub fn peer_reputation(&mut self, peer_id: PeerId) -> Reputation { + if !self.nodes.contains_key(&peer_id) { + self.nodes.insert(peer_id.clone(), Node::new(self.sets.len())); + } + + let entry = match self.nodes.entry(peer_id) { + Entry::Vacant(_) => unreachable!("guaranteed to be inserted above; qed"), + Entry::Occupied(e) => e, + }; + + Reputation { node: Some(entry) } + } + + /// Returns an object that grants access to the state of a peer in the context of a specific + /// set. + /// + /// # Panic + /// + /// `set` must be within range of the sets passed to [`PeersState::new`]. + /// + pub fn peer<'a>(&'a mut self, set: usize, peer_id: &'a PeerId) -> Peer<'a> { + // The code below will panic anyway if this happens to be false, but this earlier assert + // makes it explicit what is wrong. + assert!(set < self.sets.len()); + + match self.nodes.get_mut(peer_id).map(|p| &p.sets[set]) { + None | Some(MembershipState::NotMember) => Peer::Unknown(UnknownPeer { parent: self, + set, peer_id: Cow::Borrowed(peer_id), }), - Some(peer) => { - if peer.connection_state.is_connected() { - Peer::Connected(ConnectedPeer { - state: self, - peer_id: Cow::Borrowed(peer_id), - }) - } else { - Peer::NotConnected(NotConnectedPeer { - state: self, - peer_id: Cow::Borrowed(peer_id), - }) - } + Some(MembershipState::In) | Some(MembershipState::Out) => { + Peer::Connected(ConnectedPeer { + state: self, + set, + peer_id: Cow::Borrowed(peer_id), + }) } + Some(MembershipState::NotConnected { .. }) => Peer::NotConnected(NotConnectedPeer { + state: self, + set, + peer_id: Cow::Borrowed(peer_id), + }), } } @@ -157,22 +219,49 @@ impl PeersState { self.nodes.keys() } - /// Returns the list of peers we are connected to. + /// Returns the list of peers we are connected to in the context of a specific set. + /// + /// # Panic + /// + /// `set` must be within range of the sets passed to [`PeersState::new`]. + /// // Note: this method could theoretically return a `ConnectedPeer`, but implementing that // isn't simple. - pub fn connected_peers(&self) -> impl Iterator { - self.nodes.iter() - .filter(|(_, p)| p.connection_state.is_connected()) + pub fn connected_peers(&self, set: usize) -> impl Iterator { + // The code below will panic anyway if this happens to be false, but this earlier assert + // makes it explicit what is wrong. + assert!(set < self.sets.len()); + + self.nodes + .iter() + .filter(move |(_, p)| p.sets[set].is_connected()) .map(|(p, _)| p) } /// Returns the peer with the highest reputation and that we are not connected to. /// /// If multiple nodes have the same reputation, which one is returned is unspecified. - pub fn highest_not_connected_peer(&mut self) -> Option { - let outcome = self.nodes + /// + /// # Panic + /// + /// `set` must be within range of the sets passed to [`PeersState::new`]. + /// + pub fn highest_not_connected_peer(&mut self, set: usize) -> Option { + // The code below will panic anyway if this happens to be false, but this earlier assert + // makes it explicit what is wrong. + assert!(set < self.sets.len()); + + let outcome = self + .nodes .iter_mut() - .filter(|(_, Node { connection_state, .. })| !connection_state.is_connected()) + .filter(|(_, Node { sets, .. })| { + match sets[set] { + MembershipState::NotMember => false, + MembershipState::In => false, + MembershipState::Out => false, + MembershipState::NotConnected { .. } => true, + } + }) .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { @@ -186,6 +275,7 @@ impl PeersState { if let Some(peer_id) = outcome { Some(NotConnectedPeer { state: self, + set, peer_id: Cow::Owned(peer_id), }) } else { @@ -195,48 +285,48 @@ impl PeersState { /// Add a node to the list of nodes that don't occupy slots. /// - /// Has no effect if the peer was already in the group. - pub fn add_no_slot_node(&mut self, peer_id: PeerId) { + /// Has no effect if the node was already in the group. + pub fn add_no_slot_node(&mut self, set: usize, peer_id: PeerId) { // Reminder: `HashSet::insert` returns false if the node was already in the set - if !self.no_slot_nodes.insert(peer_id.clone()) { + if !self.sets[set].no_slot_nodes.insert(peer_id.clone()) { return; } if let Some(peer) = self.nodes.get_mut(&peer_id) { - match peer.connection_state { - ConnectionState::In => self.num_in -= 1, - ConnectionState::Out => self.num_out -= 1, - ConnectionState::NotConnected { .. } => {}, + match peer.sets[set] { + MembershipState::In => self.sets[set].num_in -= 1, + MembershipState::Out => self.sets[set].num_out -= 1, + MembershipState::NotConnected { .. } | MembershipState::NotMember => {} } } } /// Removes a node from the list of nodes that don't occupy slots. /// - /// Has no effect if the peer was not in the group. - pub fn remove_no_slot_node(&mut self, peer_id: &PeerId) { + /// Has no effect if the node was not in the group. + pub fn remove_no_slot_node(&mut self, set: usize, peer_id: &PeerId) { // Reminder: `HashSet::remove` returns false if the node was already not in the set - if !self.no_slot_nodes.remove(peer_id) { + if !self.sets[set].no_slot_nodes.remove(peer_id) { return; } if let Some(peer) = self.nodes.get_mut(peer_id) { - match peer.connection_state { - ConnectionState::In => self.num_in += 1, - ConnectionState::Out => self.num_out += 1, - ConnectionState::NotConnected { .. } => {}, + match peer.sets[set] { + MembershipState::In => self.sets[set].num_in += 1, + MembershipState::Out => self.sets[set].num_out += 1, + MembershipState::NotConnected { .. } | MembershipState::NotMember => {} } } } } -/// Grants access to the state of a peer in the `PeersState`. +/// Grants access to the state of a peer in the [`PeersState`] in the context of a specific set. pub enum Peer<'a> { /// We are connected to this node. Connected(ConnectedPeer<'a>), /// We are not connected to this node. NotConnected(NotConnectedPeer<'a>), - /// We have never heard of this node. + /// We have never heard of this node, or it is not part of the set. Unknown(UnknownPeer<'a>), } @@ -253,7 +343,7 @@ impl<'a> Peer<'a> { /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_not_connected(self) -> Option> { match self { Peer::Connected(_) => None, @@ -264,7 +354,7 @@ impl<'a> Peer<'a> { /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_unknown(self) -> Option> { match self { Peer::Connected(_) => None, @@ -277,10 +367,16 @@ impl<'a> Peer<'a> { /// A peer that is connected to us. pub struct ConnectedPeer<'a> { state: &'a mut PeersState, + set: usize, peer_id: Cow<'a, PeerId>, } impl<'a> ConnectedPeer<'a> { + /// Get the `PeerId` associated to this `ConnectedPeer`. + pub fn peer_id(&self) -> &PeerId { + &self.peer_id + } + /// Destroys this `ConnectedPeer` and returns the `PeerId` inside of it. pub fn into_peer_id(self) -> PeerId { self.peer_id.into_owned() @@ -288,65 +384,74 @@ impl<'a> ConnectedPeer<'a> { /// Switches the peer to "not connected". pub fn disconnect(self) -> NotConnectedPeer<'a> { - let is_no_slot_occupy = self.state.no_slot_nodes.contains(&*self.peer_id); - if let Some(mut node) = self.state.nodes.get_mut(&*self.peer_id) { + let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); + if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { if !is_no_slot_occupy { - match node.connection_state { - ConnectionState::In => self.state.num_in -= 1, - ConnectionState::Out => self.state.num_out -= 1, - ConnectionState::NotConnected { .. } => - debug_assert!(false, "State inconsistency: disconnecting a disconnected node") + match node.sets[self.set] { + MembershipState::In => self.state.sets[self.set].num_in -= 1, + MembershipState::Out => self.state.sets[self.set].num_out -= 1, + MembershipState::NotMember | MembershipState::NotConnected { .. } => { + debug_assert!( + false, + "State inconsistency: disconnecting a disconnected node" + ) + } } } - node.connection_state = ConnectionState::NotConnected { + node.sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now(), }; } else { - debug_assert!(false, "State inconsistency: disconnecting a disconnected node"); + debug_assert!( + false, + "State inconsistency: disconnecting a disconnected node" + ); } NotConnectedPeer { state: self.state, + set: self.set, peer_id: self.peer_id, } } - /// Returns the reputation value of the node. - pub fn reputation(&self) -> i32 { - self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) - } - - /// Sets the reputation of the peer. - pub fn set_reputation(&mut self, value: i32) { - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - node.reputation = value; - } else { - debug_assert!(false, "State inconsistency: set_reputation on an unknown node"); - } - } - /// Performs an arithmetic addition on the reputation score of that peer. /// /// In case of overflow, the value will be capped. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. pub fn add_reputation(&mut self, modifier: i32) { if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { node.reputation = node.reputation.saturating_add(modifier); } else { - debug_assert!(false, "State inconsistency: add_reputation on an unknown node"); + debug_assert!( + false, + "State inconsistency: add_reputation on an unknown node" + ); } } + + /// Returns the reputation value of the node. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. + pub fn reputation(&self) -> i32 { + self.state + .nodes + .get(&*self.peer_id) + .map_or(0, |p| p.reputation) + } } /// A peer that is not connected to us. #[derive(Debug)] pub struct NotConnectedPeer<'a> { state: &'a mut PeersState, + set: usize, peer_id: Cow<'a, PeerId>, } impl<'a> NotConnectedPeer<'a> { /// Destroys this `NotConnectedPeer` and returns the `PeerId` inside of it. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_peer_id(self) -> PeerId { self.peer_id.into_owned() } @@ -359,7 +464,7 @@ impl<'a> NotConnectedPeer<'a> { None => return, }; - if let ConnectionState::NotConnected { last_connected } = &mut state.connection_state { + if let MembershipState::NotConnected { last_connected } = &mut state.sets[self.set] { *last_connected = Instant::now(); } } @@ -381,8 +486,8 @@ impl<'a> NotConnectedPeer<'a> { } }; - match state.connection_state { - ConnectionState::NotConnected { last_connected } => last_connected, + match state.sets[self.set] { + MembershipState::NotConnected { last_connected } => last_connected, _ => { error!(target: "peerset", "State inconsistency with {}", self.peer_id); Instant::now() @@ -397,25 +502,31 @@ impl<'a> NotConnectedPeer<'a> { /// /// Non-slot-occupying nodes don't count towards the number of slots. pub fn try_outgoing(self) -> Result, NotConnectedPeer<'a>> { - let is_no_slot_occupy = self.state.no_slot_nodes.contains(&*self.peer_id); + let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_out to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.num_out >= self.state.max_out && !is_no_slot_occupy { + if self.state.sets[self.set].num_out >= self.state.sets[self.set].max_out + && !is_no_slot_occupy + { return Err(self); } - if let Some(mut peer) = self.state.nodes.get_mut(&*self.peer_id) { - peer.connection_state = ConnectionState::Out; + if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { + peer.sets[self.set] = MembershipState::Out; if !is_no_slot_occupy { - self.state.num_out += 1; + self.state.sets[self.set].num_out += 1; } } else { - debug_assert!(false, "State inconsistency: try_outgoing on an unknown node"); + debug_assert!( + false, + "State inconsistency: try_outgoing on an unknown node" + ); } Ok(ConnectedPeer { state: self.state, + set: self.set, peer_id: self.peer_id, }) } @@ -427,74 +538,95 @@ impl<'a> NotConnectedPeer<'a> { /// /// Non-slot-occupying nodes don't count towards the number of slots. pub fn try_accept_incoming(self) -> Result, NotConnectedPeer<'a>> { - let is_no_slot_occupy = self.state.no_slot_nodes.contains(&*self.peer_id); + let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_in to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.num_in >= self.state.max_in && !is_no_slot_occupy { + if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in + && !is_no_slot_occupy + { return Err(self); } - if let Some(mut peer) = self.state.nodes.get_mut(&*self.peer_id) { - peer.connection_state = ConnectionState::In; + if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { + peer.sets[self.set] = MembershipState::In; if !is_no_slot_occupy { - self.state.num_in += 1; + self.state.sets[self.set].num_in += 1; } } else { - debug_assert!(false, "State inconsistency: try_accept_incoming on an unknown node"); + debug_assert!( + false, + "State inconsistency: try_accept_incoming on an unknown node" + ); } Ok(ConnectedPeer { state: self.state, + set: self.set, peer_id: self.peer_id, }) } /// Returns the reputation value of the node. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. pub fn reputation(&self) -> i32 { - self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) + self.state + .nodes + .get(&*self.peer_id) + .map_or(0, |p| p.reputation) } /// Sets the reputation of the peer. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn set_reputation(&mut self, value: i32) { if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { node.reputation = value; } else { - debug_assert!(false, "State inconsistency: set_reputation on an unknown node"); - } - } - - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - pub fn add_reputation(&mut self, modifier: i32) { - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - node.reputation = node.reputation.saturating_add(modifier); - } else { - debug_assert!(false, "State inconsistency: add_reputation on an unknown node"); + debug_assert!( + false, + "State inconsistency: set_reputation on an unknown node" + ); } } - /// Un-discovers the peer. Removes it from the list. + /// Removes the peer from the list of members of the set. pub fn forget_peer(self) -> UnknownPeer<'a> { - if self.state.nodes.remove(&*self.peer_id).is_none() { + if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { + debug_assert!(!matches!(peer.sets[self.set], MembershipState::NotMember)); + peer.sets[self.set] = MembershipState::NotMember; + + // Remove the peer from `self.state.nodes` entirely if it isn't a member of any set. + if peer.reputation == 0 && peer + .sets + .iter() + .all(|set| matches!(set, MembershipState::NotMember)) + { + self.state.nodes.remove(&*self.peer_id); + } + } else { + debug_assert!(false, "State inconsistency: forget_peer on an unknown node"); error!( target: "peerset", "State inconsistency with {} when forgetting peer", self.peer_id ); - } + }; UnknownPeer { parent: self.state, + set: self.set, peer_id: self.peer_id, } } } -/// A peer that we have never heard of. +/// A peer that we have never heard of or that isn't part of the set. pub struct UnknownPeer<'a> { parent: &'a mut PeersState, + set: usize, peer_id: Cow<'a, PeerId>, } @@ -504,96 +636,240 @@ impl<'a> UnknownPeer<'a> { /// The node starts with a reputation of 0. You can adjust these default /// values using the `NotConnectedPeer` that this method returns. pub fn discover(self) -> NotConnectedPeer<'a> { - self.parent.nodes.insert(self.peer_id.clone().into_owned(), Node { - connection_state: ConnectionState::NotConnected { - last_connected: Instant::now(), - }, - reputation: 0, - }); + let num_sets = self.parent.sets.len(); + + self.parent + .nodes + .entry(self.peer_id.clone().into_owned()) + .or_insert_with(|| Node::new(num_sets)) + .sets[self.set] = MembershipState::NotConnected { + last_connected: Instant::now(), + }; - let state = self.parent; NotConnectedPeer { - state, + state: self.parent, + set: self.set, peer_id: self.peer_id, } } } +/// Access to the reputation of a peer. +pub struct Reputation<'a> { + /// Node entry in [`PeersState::nodes`]. Always `Some` except right before dropping. + node: Option>, +} + +impl<'a> Reputation<'a> { + /// Returns the reputation value of the node. + pub fn reputation(&self) -> i32 { + self.node.as_ref().unwrap().get().reputation + } + + /// Sets the reputation of the peer. + pub fn set_reputation(&mut self, value: i32) { + self.node.as_mut().unwrap().get_mut().reputation = value; + } + + /// Performs an arithmetic addition on the reputation score of that peer. + /// + /// In case of overflow, the value will be capped. + pub fn add_reputation(&mut self, modifier: i32) { + let reputation = &mut self.node.as_mut().unwrap().get_mut().reputation; + *reputation = reputation.saturating_add(modifier); + } +} + +impl<'a> Drop for Reputation<'a> { + fn drop(&mut self) { + if let Some(node) = self.node.take() { + if node.get().reputation == 0 && + node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + { + node.remove(); + } + } + } +} + #[cfg(test)] mod tests { - use super::{PeersState, Peer}; + use super::{Peer, PeersState, SetConfig}; use libp2p::PeerId; + use std::iter; #[test] fn full_slots_in() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { + in_peers: 1, + out_peers: 1, + })); let id1 = PeerId::random(); let id2 = PeerId::random(); - if let Peer::Unknown(e) = peers_state.peer(&id1) { + if let Peer::Unknown(e) = peers_state.peer(0, &id1) { assert!(e.discover().try_accept_incoming().is_ok()); } - if let Peer::Unknown(e) = peers_state.peer(&id2) { + if let Peer::Unknown(e) = peers_state.peer(0, &id2) { assert!(e.discover().try_accept_incoming().is_err()); } } #[test] fn no_slot_node_doesnt_use_slot() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { + in_peers: 1, + out_peers: 1, + })); let id1 = PeerId::random(); let id2 = PeerId::random(); - peers_state.add_no_slot_node(id1.clone()); - if let Peer::Unknown(p) = peers_state.peer(&id1) { + peers_state.add_no_slot_node(0, id1.clone()); + if let Peer::Unknown(p) = peers_state.peer(0, &id1) { assert!(p.discover().try_accept_incoming().is_ok()); - } else { panic!() } + } else { + panic!() + } - if let Peer::Unknown(e) = peers_state.peer(&id2) { + if let Peer::Unknown(e) = peers_state.peer(0, &id2) { assert!(e.discover().try_accept_incoming().is_ok()); - } else { panic!() } + } else { + panic!() + } } #[test] fn disconnecting_frees_slot() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { + in_peers: 1, + out_peers: 1, + })); let id1 = PeerId::random(); let id2 = PeerId::random(); - assert!(peers_state.peer(&id1).into_unknown().unwrap().discover().try_accept_incoming().is_ok()); - assert!(peers_state.peer(&id2).into_unknown().unwrap().discover().try_accept_incoming().is_err()); - peers_state.peer(&id1).into_connected().unwrap().disconnect(); - assert!(peers_state.peer(&id2).into_not_connected().unwrap().try_accept_incoming().is_ok()); + assert!(peers_state + .peer(0, &id1) + .into_unknown() + .unwrap() + .discover() + .try_accept_incoming() + .is_ok()); + assert!(peers_state + .peer(0, &id2) + .into_unknown() + .unwrap() + .discover() + .try_accept_incoming() + .is_err()); + peers_state + .peer(0, &id1) + .into_connected() + .unwrap() + .disconnect(); + assert!(peers_state + .peer(0, &id2) + .into_not_connected() + .unwrap() + .try_accept_incoming() + .is_ok()); } #[test] fn highest_not_connected_peer() { - let mut peers_state = PeersState::new(25, 25); + let mut peers_state = PeersState::new(iter::once(SetConfig { + in_peers: 25, + out_peers: 25, + })); let id1 = PeerId::random(); let id2 = PeerId::random(); - assert!(peers_state.highest_not_connected_peer().is_none()); - peers_state.peer(&id1).into_unknown().unwrap().discover().set_reputation(50); - peers_state.peer(&id2).into_unknown().unwrap().discover().set_reputation(25); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id2).into_not_connected().unwrap().set_reputation(75); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); - peers_state.peer(&id2).into_not_connected().unwrap().try_accept_incoming().unwrap(); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(100); - peers_state.peer(&id2).into_connected().unwrap().disconnect(); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(-100); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); + assert!(peers_state.highest_not_connected_peer(0).is_none()); + peers_state + .peer(0, &id1) + .into_unknown() + .unwrap() + .discover() + .set_reputation(50); + peers_state + .peer(0, &id2) + .into_unknown() + .unwrap() + .discover() + .set_reputation(25); + assert_eq!( + peers_state + .highest_not_connected_peer(0) + .map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state + .peer(0, &id2) + .into_not_connected() + .unwrap() + .set_reputation(75); + assert_eq!( + peers_state + .highest_not_connected_peer(0) + .map(|p| p.into_peer_id()), + Some(id2.clone()) + ); + peers_state + .peer(0, &id2) + .into_not_connected() + .unwrap() + .try_accept_incoming() + .unwrap(); + assert_eq!( + peers_state + .highest_not_connected_peer(0) + .map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state + .peer(0, &id1) + .into_not_connected() + .unwrap() + .set_reputation(100); + peers_state + .peer(0, &id2) + .into_connected() + .unwrap() + .disconnect(); + assert_eq!( + peers_state + .highest_not_connected_peer(0) + .map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state + .peer(0, &id1) + .into_not_connected() + .unwrap() + .set_reputation(-100); + assert_eq!( + peers_state + .highest_not_connected_peer(0) + .map(|p| p.into_peer_id()), + Some(id2.clone()) + ); } #[test] fn disconnect_no_slot_doesnt_panic() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { + in_peers: 1, + out_peers: 1, + })); let id = PeerId::random(); - peers_state.add_no_slot_node(id.clone()); - let peer = peers_state.peer(&id).into_unknown().unwrap().discover().try_outgoing().unwrap(); + peers_state.add_no_slot_node(0, id.clone()); + let peer = peers_state + .peer(0, &id) + .into_unknown() + .unwrap() + .discover() + .try_outgoing() + .unwrap(); peer.disconnect(); } } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 6fa29e3d834cf..8f64962943477 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,8 +20,8 @@ use futures::prelude::*; use libp2p::PeerId; use rand::distributions::{Distribution, Uniform, WeightedIndex}; use rand::seq::IteratorRandom; -use std::{collections::HashMap, collections::HashSet, iter, pin::Pin, task::Poll}; -use sc_peerset::{IncomingIndex, Message, PeersetConfig, Peerset, ReputationChange}; +use sc_peerset::{DropReason, IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId}; +use std::{collections::HashMap, collections::HashSet, pin::Pin, task::Poll}; #[test] fn run() { @@ -40,23 +40,30 @@ fn test_once() { let mut reserved_nodes = HashSet::::new(); let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig { - bootnodes: (0 .. Uniform::new_inclusive(0, 4).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - id - }).collect(), - priority_groups: { - let nodes = (0 .. Uniform::new_inclusive(0, 2).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - reserved_nodes.insert(id.clone()); - id - }).collect(); - vec![("foo".to_string(), nodes)] - }, - reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, - in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + sets: vec![ + SetConfig { + bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + id + }) + .collect(), + reserved_nodes: { + (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + reserved_nodes.insert(id.clone()); + id + }) + .collect() + }, + in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, + }, + ], }); futures::executor::block_on(futures::future::poll_fn(move |cx| { @@ -71,70 +78,101 @@ fn test_once() { // Perform a certain number of actions while checking that the state is consistent. If we // reach the end of the loop, the run has succeeded. - for _ in 0 .. 2500 { + for _ in 0..2500 { // Each of these weights corresponds to an action that we may perform. let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; - match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) { + match WeightedIndex::new(&action_weights) + .unwrap() + .sample(&mut rng) + { // If we generate 0, poll the peerset. 0 => match Stream::poll_next(Pin::new(&mut peerset), cx) { - Poll::Ready(Some(Message::Connect(id))) => { - if let Some(id) = incoming_nodes.iter().find(|(_, v)| **v == id).map(|(&id, _)| id) { + Poll::Ready(Some(Message::Connect { peer_id, .. })) => { + if let Some(id) = incoming_nodes + .iter() + .find(|(_, v)| **v == peer_id) + .map(|(&id, _)| id) + { incoming_nodes.remove(&id); } - assert!(connected_nodes.insert(id)); + assert!(connected_nodes.insert(peer_id)); + } + Poll::Ready(Some(Message::Drop { peer_id, .. })) => { + connected_nodes.remove(&peer_id); + } + Poll::Ready(Some(Message::Accept(n))) => { + assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())) + } + Poll::Ready(Some(Message::Reject(n))) => { + assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())) } - Poll::Ready(Some(Message::Drop(id))) => { connected_nodes.remove(&id); } - Poll::Ready(Some(Message::Accept(n))) => - assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())), - Poll::Ready(Some(Message::Reject(n))) => - assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())), Poll::Ready(None) => panic!(), Poll::Pending => {} - } + }, // If we generate 1, discover a new node. 1 => { let new_id = PeerId::random(); known_nodes.insert(new_id.clone()); - peerset.discovered(iter::once(new_id)); + peerset.add_to_peers_set(SetId::from(0), new_id); } // If we generate 2, adjust a random reputation. - 2 => if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::min_value(), i32::max_value()).sample(&mut rng); - peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); + 2 => { + if let Some(id) = known_nodes.iter().choose(&mut rng) { + let val = Uniform::new_inclusive(i32::min_value(), i32::max_value()) + .sample(&mut rng); + peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); + } } // If we generate 3, disconnect from a random node. - 3 => if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { - connected_nodes.remove(&id); - peerset.dropped(id); + 3 => { + if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { + connected_nodes.remove(&id); + peerset.dropped(SetId::from(0), id, DropReason::Unknown); + } } // If we generate 4, connect to a random node. - 4 => if let Some(id) = known_nodes.iter() - .filter(|n| incoming_nodes.values().all(|m| m != *n) && !connected_nodes.contains(*n)) - .choose(&mut rng) { - peerset.incoming(id.clone(), next_incoming_id.clone()); - incoming_nodes.insert(next_incoming_id.clone(), id.clone()); - next_incoming_id.0 += 1; + 4 => { + if let Some(id) = known_nodes + .iter() + .filter(|n| { + incoming_nodes.values().all(|m| m != *n) + && !connected_nodes.contains(*n) + }) + .choose(&mut rng) + { + peerset.incoming(SetId::from(0), id.clone(), next_incoming_id.clone()); + incoming_nodes.insert(next_incoming_id.clone(), id.clone()); + next_incoming_id.0 += 1; + } } // 5 and 6 are the reserved-only mode. - 5 => peerset_handle.set_reserved_only(true), - 6 => peerset_handle.set_reserved_only(false), + 5 => peerset_handle.set_reserved_only(SetId::from(0), true), + 6 => peerset_handle.set_reserved_only(SetId::from(0), false), // 7 and 8 are about switching a random node in or out of reserved mode. - 7 => if let Some(id) = known_nodes.iter().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) { - peerset_handle.add_reserved_peer(id.clone()); - reserved_nodes.insert(id.clone()); + 7 => { + if let Some(id) = known_nodes + .iter() + .filter(|n| !reserved_nodes.contains(*n)) + .choose(&mut rng) + { + peerset_handle.add_reserved_peer(SetId::from(0), id.clone()); + reserved_nodes.insert(id.clone()); + } } - 8 => if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { - reserved_nodes.remove(&id); - peerset_handle.remove_reserved_peer(id); + 8 => { + if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { + reserved_nodes.remove(&id); + peerset_handle.remove_reserved_peer(SetId::from(0), id); + } } - _ => unreachable!() + _ => unreachable!(), } } diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index 085f50f5be551..ffe5045461f77 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-proposer-metrics" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,4 +14,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} diff --git a/client/proposer-metrics/src/lib.rs b/client/proposer-metrics/src/lib.rs index 50498d40b62d5..8fec9779de472 100644 --- a/client/proposer-metrics/src/lib.rs +++ b/client/proposer-metrics/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Prometheus basic proposer metrics. diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 55eb51d261cdb..d213decdbc779 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-api" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -jsonrpc-pubsub = "15.0.0" +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" +jsonrpc-pubsub = "15.1.0" log = "0.4.8" -parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0"} -sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0"} +parking_lot = "0.11.1" +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-version = { version = "3.0.0", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime" , version = "3.0.0"} +sp-chain-spec = { path = "../../primitives/chain-spec" , version = "3.0.0"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sp-rpc = { version = "3.0.0", path = "../../primitives/rpc" } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index 69c036be95fe0..7c1086ab67d1b 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -99,6 +99,9 @@ const POOL_CYCLE_DETECTED: i64 = POOL_INVALID_TX + 5; const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6; /// The key type crypto is not known. const UNSUPPORTED_KEY_TYPE: i64 = POOL_INVALID_TX + 7; +/// The transaction was not included to the pool since it is unactionable, +/// it is not propagable and the local node does not author blocks. +const POOL_UNACTIONABLE: i64 = POOL_INVALID_TX + 8; impl From for rpc::Error { fn from(e: Error) -> Self { @@ -158,6 +161,14 @@ impl From for rpc::Error { message: "Immediately Dropped".into(), data: Some("The transaction couldn't enter the pool because of the limit".into()), }, + Error::Pool(PoolError::Unactionable) => rpc::Error { + code: rpc::ErrorCode::ServerError(POOL_UNACTIONABLE), + message: "Unactionable".into(), + data: Some( + "The transaction is unactionable since it is not propagable and \ + the local node does not author blocks".into(), + ), + }, Error::UnsupportedKeyType => rpc::Error { code: rpc::ErrorCode::ServerError(UNSUPPORTED_KEY_TYPE), message: "Unknown key type crypto" .into(), diff --git a/client/rpc-api/src/author/hash.rs b/client/rpc-api/src/author/hash.rs index 4287af8ede596..618159a8ad4d5 100644 --- a/client/rpc-api/src/author/hash.rs +++ b/client/rpc-api/src/author/hash.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Extrinsic helpers for author RPC module. diff --git a/client/rpc-api/src/author/mod.rs b/client/rpc-api/src/author/mod.rs index 29f5b1d26e84c..6ccf1ebab375a 100644 --- a/client/rpc-api/src/author/mod.rs +++ b/client/rpc-api/src/author/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index fd7bd0a43d778..59a0c0a2f840f 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 9bb75216c0186..5e2d484413047 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index d956a7554f8ee..7efff74225969 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/errors.rs b/client/rpc-api/src/errors.rs index 4e1a5b10fc512..8e4883a4cc20c 100644 --- a/client/rpc-api/src/errors.rs +++ b/client/rpc-api/src/errors.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/helpers.rs b/client/rpc-api/src/helpers.rs index 025fef1102c49..e85c26062b50d 100644 --- a/client/rpc-api/src/helpers.rs +++ b/client/rpc-api/src/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 7bae75181056f..814319add2a3e 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate RPC interfaces. //! diff --git a/client/rpc-api/src/metadata.rs b/client/rpc-api/src/metadata.rs index cffcbf61f5440..efe090acc621e 100644 --- a/client/rpc-api/src/metadata.rs +++ b/client/rpc-api/src/metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/offchain/error.rs b/client/rpc-api/src/offchain/error.rs index ea5223f1ce7f9..f74d419e54424 100644 --- a/client/rpc-api/src/offchain/error.rs +++ b/client/rpc-api/src/offchain/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/offchain/mod.rs b/client/rpc-api/src/offchain/mod.rs index 427b6a1cc017b..7a1f6db9e80be 100644 --- a/client/rpc-api/src/offchain/mod.rs +++ b/client/rpc-api/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/policy.rs b/client/rpc-api/src/policy.rs index 141dcfbc415f8..5d56c62bfece3 100644 --- a/client/rpc-api/src/policy.rs +++ b/client/rpc-api/src/policy.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index 1c22788062c7b..4f2a2c854ae00 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/state/helpers.rs b/client/rpc-api/src/state/helpers.rs index 0d176ea67f35b..cb7bd380afa51 100644 --- a/client/rpc-api/src/state/helpers.rs +++ b/client/rpc-api/src/state/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 874fc862a39d2..aae2dcb5ae7d3 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index 4897aa485cbe4..a0dfd863ce3aa 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index c5dddedef9564..b2b793a8ee400 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index fbeec23ea5085..2e8a7aa12633b 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -79,9 +79,11 @@ pub trait SystemApi { /// Returns current state of the network. /// - /// **Warning**: This API is not stable. - // TODO: make this stable and move structs https://github.com/paritytech/substrate/issues/1890 - #[rpc(name = "system_networkState", returns = "jsonrpc_core::Value")] + /// **Warning**: This API is not stable. Please do not programmatically interpret its output, + /// as its format might change at any time. + // TODO: the future of this call is uncertain: https://github.com/paritytech/substrate/issues/1890 + // https://github.com/paritytech/substrate/issues/5541 + #[rpc(name = "system_unstable_networkState", returns = "jsonrpc_core::Value")] fn system_network_state(&self) -> Compat>>; @@ -108,4 +110,18 @@ pub trait SystemApi { /// known block. #[rpc(name = "system_syncState", returns = "SyncState")] fn system_sync_state(&self) -> Receiver>; + + /// Adds the supplied directives to the current log filter + /// + /// The syntax is identical to the CLI `=`: + /// + /// `sync=debug,state=trace` + #[rpc(name = "system_addLogFilter", returns = "()")] + fn system_add_log_filter(&self, directives: String) + -> Result<(), jsonrpc_core::Error>; + + /// Resets the log filter to Substrate defaults + #[rpc(name = "system_resetLogFilter", returns = "()")] + fn system_reset_log_filter(&self) + -> Result<(), jsonrpc_core::Error>; } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 4fdf0298a530b..95c3e4194cd51 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-server" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.1.6" -jsonrpc-core = "15.0.0" -pubsub = { package = "jsonrpc-pubsub", version = "15.0.0" } +jsonrpc-core = "15.1.0" +pubsub = { package = "jsonrpc-pubsub", version = "15.1.0" } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} serde = "1.0.101" serde_json = "1.0.41" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -http = { package = "jsonrpc-http-server", version = "15.0.0" } -ipc = { package = "jsonrpc-ipc-server", version = "15.0.0" } -ws = { package = "jsonrpc-ws-server", version = "15.0.0" } +http = { package = "jsonrpc-http-server", version = "15.1.0" } +ipc = { package = "jsonrpc-ipc-server", version = "15.1.0" } +ws = { package = "jsonrpc-ws-server", version = "15.1.0" } diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 1f99e8bb0d242..26d3cb1b7816a 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 74139714c8cb7..2cbc61716c317 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -32,36 +32,41 @@ use futures::{future::Either, Future}; /// Metrics for RPC middleware #[derive(Debug, Clone)] pub struct RpcMetrics { - rpc_calls: CounterVec, + rpc_calls: Option>, } impl RpcMetrics { /// Create an instance of metrics pub fn new(metrics_registry: Option<&Registry>) -> Result { - metrics_registry.and_then(|r| { - Some(RpcMetrics { - rpc_calls: register(CounterVec::new( - Opts::new( - "rpc_calls_total", - "Number of rpc calls received", - ), - &["protocol"] - ).ok()?, r).ok()?, - }) - }).ok_or(PrometheusError::Msg("Cannot register metric".to_string())) + Ok(Self { + rpc_calls: metrics_registry.map(|r| + register( + CounterVec::new( + Opts::new( + "rpc_calls_total", + "Number of rpc calls received", + ), + &["protocol"] + )?, + r, + ) + ).transpose()?, + }) } } /// Middleware for RPC calls pub struct RpcMiddleware { - metrics: Option, + metrics: RpcMetrics, transport_label: String, } impl RpcMiddleware { - /// Create an instance of middleware with provided metrics - /// transport_label is used as a label for Prometheus collector - pub fn new(metrics: Option, transport_label: &str) -> Self { + /// Create an instance of middleware. + /// + /// - `metrics`: Will be used to report statistics. + /// - `transport_label`: The label that is used when reporting the statistics. + pub fn new(metrics: RpcMetrics, transport_label: &str) -> Self { RpcMiddleware { metrics, transport_label: String::from(transport_label), @@ -78,8 +83,8 @@ impl RequestMiddleware for RpcMiddleware { F: Fn(Request, M) -> X + Send + Sync, X: Future, Error = ()> + Send + 'static, { - if let Some(ref metrics) = self.metrics { - metrics.rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); + if let Some(ref rpc_calls) = self.metrics.rpc_calls { + rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); } Either::B(next(request, meta)) diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 021c795fe5b94..203bb0e525d8b 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,43 +13,45 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-rpc-api = { version = "0.8.0", path = "../rpc-api" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.4" } +sc-rpc-api = { version = "0.9.0", path = "../rpc-api" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } futures = { version = "0.3.1", features = ["compat"] } -jsonrpc-pubsub = "15.0.0" +jsonrpc-pubsub = "15.1.0" log = "0.4.8" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -rpc = { package = "jsonrpc-core", version = "15.0.0" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +rpc = { package = "jsonrpc-core", version = "15.1.0" } +sp-version = { version = "3.0.0", path = "../../primitives/version" } serde_json = "1.0.41" -sp-session = { version = "2.0.0", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-chain-spec = { version = "2.0.0", path = "../../primitives/chain-spec" } -sc-executor = { version = "0.8.0", path = "../executor" } -sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-session = { version = "3.0.0", path = "../../primitives/session" } +sp-offchain = { version = "3.0.0", path = "../../primitives/offchain" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sp-rpc = { version = "3.0.0", path = "../../primitives/rpc" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-chain-spec = { version = "3.0.0", path = "../../primitives/chain-spec" } +sc-executor = { version = "0.9.0", path = "../executor" } +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +sc-keystore = { version = "3.0.0", path = "../keystore" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sc-tracing = { version = "3.0.0", path = "../tracing" } hash-db = { version = "0.15.2", default-features = false } -parking_lot = "0.10.0" +parking_lot = "0.11.1" lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] assert_matches = "1.3.0" futures01 = { package = "futures", version = "0.1.29" } lazy_static = "1.4.0" -sc-network = { version = "0.8.0", path = "../network" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sc-network = { version = "0.9.0", path = "../network" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } +sc-transaction-pool = { version = "3.0.0", path = "../transaction-pool" } +sc-cli = { version = "0.9.0", path = "../cli" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 1db90e209d0d6..7cd980544503b 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -215,7 +215,7 @@ impl AuthorApi, BlockHash

> for Author Ok(watcher) => { subscriptions.add(subscriber, move |sink| { sink - .sink_map_err(|_| unimplemented!()) + .sink_map_err(|e| log::debug!("Subscription sink failed: {:?}", e)) .send_all(Compat::new(watcher)) .map(|_| ()) }); diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index dc553e60dbfbe..0e7cb5539501d 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -65,6 +65,7 @@ impl Default for TestSetup { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index 816dbba866417..9687b13d50fc7 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Blockchain API backend for full nodes. diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs index 8a4afbed71c16..41d4d02e33c9d 100644 --- a/client/rpc/src/chain/chain_light.rs +++ b/client/rpc/src/chain/chain_light.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Blockchain API backend for light nodes. diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index cb67d9ba23166..d3a28d534335f 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index b36fc4eab1d86..80b990a9fbf03 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 434859a39c2f4..7b3af8cb2f328 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index f8d2bb6a50f9c..dbb48a9e51934 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/offchain/tests.rs b/client/rpc/src/offchain/tests.rs index f65971a7ffe8a..b8054d816325f 100644 --- a/client/rpc/src/offchain/tests.rs +++ b/client/rpc/src/offchain/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 8573b3cf82551..52a4ed1d753b5 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index fda73cea27110..8d93d445b08cb 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! State API backend for full nodes. @@ -541,7 +543,7 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys( &BlockId::Hash(block), @@ -563,7 +565,7 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage( &BlockId::Hash(block), @@ -585,7 +587,7 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_hash( &BlockId::Hash(block), diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 8f4dce08b3fb6..c8c921345877c 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! State API backend for light nodes. @@ -588,7 +590,7 @@ fn runtime_version>( ) .then(|version| ready(version.and_then(|version| Decode::decode(&mut &version.0[..]) - .map_err(|e| client_err(ClientError::VersionInvalid(e.what().into()))) + .map_err(|e| client_err(ClientError::VersionInvalid(e.to_string()))) ))) } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index d145ac5e5510b..87b0fae1d6b3c 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 17fb6b77a5710..240de6c62876e 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,6 +24,7 @@ mod tests; use futures::{future::BoxFuture, FutureExt, TryFutureExt}; use futures::{channel::oneshot, compat::Compat}; use sc_rpc_api::{DenyUnsafe, Receiver}; +use sc_tracing::logging; use sp_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{self, Header as HeaderT}; @@ -197,4 +198,15 @@ impl SystemApi::Number> for Sy let _ = self.send_back.unbounded_send(Request::SyncState(tx)); Receiver(Compat::new(rx)) } + + fn system_add_log_filter(&self, directives: String) -> std::result::Result<(), rpc::Error> { + self.deny_unsafe.check_if_safe()?; + logging::add_directives(&directives); + logging::reload_filter().map_err(|_e| rpc::Error::internal_error()) + } + + fn system_reset_log_filter(&self)-> std::result::Result<(), rpc::Error> { + self.deny_unsafe.check_if_safe()?; + logging::reset_log_filter().map_err(|_e| rpc::Error::internal_error()) + } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 61f1940dc2010..c196403501035 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,7 +24,10 @@ use substrate_test_runtime_client::runtime::Block; use assert_matches::assert_matches; use futures::prelude::*; use sp_utils::mpsc::tracing_unbounded; -use std::thread; +use std::{ + process::{Stdio, Command}, env, io::{BufReader, BufRead, Write}, + sync::{Arc, Mutex}, thread, time::Duration +}; struct Status { pub peers: usize, @@ -333,3 +336,81 @@ fn system_network_remove_reserved() { assert_eq!(runtime.block_on(good_fut), Ok(())); assert!(runtime.block_on(bad_fut).is_err()); } + +#[test] +fn test_add_reset_log_filter() { + const EXPECTED_BEFORE_ADD: &'static str = "EXPECTED_BEFORE_ADD"; + const EXPECTED_AFTER_ADD: &'static str = "EXPECTED_AFTER_ADD"; + + // Enter log generation / filter reload + if std::env::var("TEST_LOG_FILTER").is_ok() { + sc_tracing::logging::LoggerBuilder::new("test_before_add=debug").init().unwrap(); + for line in std::io::stdin().lock().lines() { + let line = line.expect("Failed to read bytes"); + if line.contains("add_reload") { + api(None).system_add_log_filter("test_after_add".into()).expect("`system_add_log_filter` failed"); + } else if line.contains("reset") { + api(None).system_reset_log_filter().expect("`system_reset_log_filter` failed"); + } else if line.contains("exit") { + return; + } + log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD); + log::debug!(target: "test_after_add", "{}", EXPECTED_AFTER_ADD); + } + } + + // Call this test again to enter the log generation / filter reload block + let test_executable = env::current_exe().expect("Unable to get current executable!"); + let mut child_process = Command::new(test_executable) + .env("TEST_LOG_FILTER", "1") + .args(&["--nocapture", "test_add_reset_log_filter"]) + .stdin(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap(); + + let child_stderr = child_process.stderr.take().expect("Could not get child stderr"); + let mut child_out = BufReader::new(child_stderr); + let mut child_in = child_process.stdin.take().expect("Could not get child stdin"); + + let child_out_str = Arc::new(Mutex::new(String::new())); + let shared = child_out_str.clone(); + + let _handle = thread::spawn(move || { + let mut line = String::new(); + while let Ok(_) = child_out.read_line(&mut line) { + shared.lock().unwrap().push_str(&line); + line.clear(); + } + }); + + // Initiate logs loop in child process + child_in.write(b"\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + let test1_str = child_out_str.lock().unwrap().clone(); + // Assert that only the first target is present + assert!(test1_str.contains(EXPECTED_BEFORE_ADD)); + assert!(!test1_str.contains(EXPECTED_AFTER_ADD)); + child_out_str.lock().unwrap().clear(); + + // Initiate add directive & reload in child process + child_in.write(b"add_reload\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + let test2_str = child_out_str.lock().unwrap().clone(); + // Assert that both targets are now present + assert!(test2_str.contains(EXPECTED_BEFORE_ADD)); + assert!(test2_str.contains(EXPECTED_AFTER_ADD)); + child_out_str.lock().unwrap().clear(); + + // Initiate logs filter reset in child process + child_in.write(b"reset\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + let test3_str = child_out_str.lock().unwrap().clone(); + // Assert that only the first target is present as it was initially + assert!(test3_str.contains(EXPECTED_BEFORE_ADD)); + assert!(!test3_str.contains(EXPECTED_AFTER_ADD)); + + // Return from child process + child_in.write(b"exit\n").unwrap(); + assert!(child_process.wait().expect("Error waiting for child process").success()); +} diff --git a/client/rpc/src/testing.rs b/client/rpc/src/testing.rs index 9530ff0020644..b69cc7d4b1940 100644 --- a/client/rpc/src/testing.rs +++ b/client/rpc/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 3569b2e7e5853..0a9be763b240b 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -24,71 +24,71 @@ wasmtime = [ test-helpers = [] [dependencies] -derive_more = "0.99.2" +thiserror = "1.0.21" futures01 = { package = "futures", version = "0.1.29" } futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-pubsub = "15.0" -jsonrpc-core = "15.0" +jsonrpc-pubsub = "15.1" +jsonrpc-core = "15.1" rand = "0.7.3" -parking_lot = "0.10.0" +parking_lot = "0.11.1" lazy_static = "1.4.0" -log = "0.4.8" -slog = { version = "2.5.2", features = ["nested-values"] } +log = "0.4.11" futures-timer = "3.0.1" wasm-timer = "0.2" exit-future = "0.2.0" -pin-project = "0.4.8" +pin-project = "1.0.4" hash-db = "0.15.2" serde = "1.0.101" serde_json = "1.0.41" -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-session = { version = "2.0.0", path = "../../primitives/session" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-network = { version = "0.8.0", path = "../network" } -sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } -sc-light = { version = "2.0.0", path = "../light" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sc-client-db = { version = "0.8.0", default-features = false, path = "../db" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0", path = "../executor" } -sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0", path = "../rpc" } -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } -sc-informant = { version = "0.8.0", path = "../informant" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sc-offchain = { version = "2.0.0", path = "../offchain" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sc-tracing = { version = "2.0.0", path = "../tracing" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -tracing = "0.1.19" +sc-keystore = { version = "3.0.0", path = "../keystore" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-trie = { version = "3.0.0", path = "../../primitives/trie" } +sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +sp-version = { version = "3.0.0", path = "../../primitives/version" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sp-session = { version = "3.0.0", path = "../../primitives/session" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +sp-application-crypto = { version = "3.0.0", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } +sc-network = { version = "0.9.0", path = "../network" } +sc-chain-spec = { version = "3.0.0", path = "../chain-spec" } +sc-light = { version = "3.0.0", path = "../light" } +sc-client-api = { version = "3.0.0", path = "../api" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sc-client-db = { version = "0.9.0", default-features = false, path = "../db" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor = { version = "0.9.0", path = "../executor" } +sc-transaction-pool = { version = "3.0.0", path = "../transaction-pool" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sc-rpc-server = { version = "3.0.0", path = "../rpc-servers" } +sc-rpc = { version = "3.0.0", path = "../rpc" } +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +sp-block-builder = { version = "3.0.0", path = "../../primitives/block-builder" } +sc-informant = { version = "0.9.0", path = "../informant" } +sc-telemetry = { version = "3.0.0", path = "../telemetry" } +sc-offchain = { version = "3.0.0", path = "../offchain" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +sc-tracing = { version = "3.0.0", path = "../tracing" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +tracing = "0.1.22" tracing-futures = { version = "0.2.4" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] tempfile = "3.1.0" -directories = "2.0.2" +directories = "3.0.1" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime/" } -sp-consensus-babe = { version = "0.8.0", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } -tokio = { version = "0.2", default-features = false } +sp-consensus-babe = { version = "0.9.0", path = "../../primitives/consensus/babe" } +grandpa = { version = "0.9.0", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa-primitives = { version = "3.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } +tokio = { version = "0.2.25", default-features = false } async-std = { version = "1.6.5", default-features = false } +tracing-subscriber = "0.2.15" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 2a4dda477ab75..882a6c4062650 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,8 +17,7 @@ // along with this program. If not, see . use crate::{ - error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, - TelemetryConnectionSinks, RpcHandlers, NetworkStatusSinks, + error::Error, MallocSizeOfWasm, RpcHandlers, NetworkStatusSinks, start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, metrics::MetricsService, client::{light, Client, ClientConfig}, @@ -41,17 +40,24 @@ use futures::{ }; use sc_keystore::LocalKeystore; use log::{info, warn}; -use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +use sc_network::config::{Role, OnDemand}; use sc_network::NetworkService; +use sc_network::block_request_handler::{self, BlockRequestHandler}; +use sc_network::light_client_requests::{self, handler::LightClientRequestHandler}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo, + Block as BlockT, HashFor, Zero, BlockIdTo, }; use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; use std::sync::Arc; use wasm_timer::SystemTime; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sc_telemetry::{ + telemetry, + ConnectionMessage, + TelemetryConnectionNotifier, + SUBSTRATE_INFO, +}; use sp_transaction_pool::MaintainedTransactionPool; use prometheus_endpoint::Registry; use sc_client_db::{Backend, DatabaseSettings}; @@ -59,7 +65,7 @@ use sp_core::traits::{ CodeExecutor, SpawnNamed, }; -use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::BuildStorage; use sc_client_api::{ BlockBackend, BlockchainEvents, @@ -205,10 +211,24 @@ pub type TLightClientWithBackend = Client< TRtApi, >; +trait AsCryptoStoreRef { + fn keystore_ref(&self) -> Arc; + fn sync_keystore_ref(&self) -> Arc; +} + +impl AsCryptoStoreRef for Arc where T: CryptoStore + SyncCryptoStore + 'static { + fn keystore_ref(&self) -> Arc { + self.clone() + } + fn sync_keystore_ref(&self) -> Arc { + self.clone() + } +} + /// Construct and hold different layers of Keystore wrappers pub struct KeystoreContainer { - keystore: Arc, - sync_keystore: SyncCryptoStorePtr, + remote: Option>, + local: Arc, } impl KeystoreContainer { @@ -221,22 +241,49 @@ impl KeystoreContainer { )?, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); - let sync_keystore = keystore.clone() as SyncCryptoStorePtr; - Ok(Self { - keystore, - sync_keystore, - }) + Ok(Self{remote: Default::default(), local: keystore}) + } + + /// Set the remote keystore. + /// Should be called right away at startup and not at runtime: + /// even though this overrides any previously set remote store, it + /// does not reset any references previously handed out - they will + /// stick araound. + pub fn set_remote_keystore(&mut self, remote: Arc) + where T: CryptoStore + SyncCryptoStore + 'static + { + self.remote = Some(Box::new(remote)) } /// Returns an adapter to the asynchronous keystore that implements `CryptoStore` pub fn keystore(&self) -> Arc { - self.keystore.clone() + if let Some(c) = self.remote.as_ref() { + c.keystore_ref() + } else { + self.local.clone() + } } /// Returns the synchrnous keystore wrapper pub fn sync_keystore(&self) -> SyncCryptoStorePtr { - self.sync_keystore.clone() + if let Some(c) = self.remote.as_ref() { + c.sync_keystore_ref() + } else { + self.local.clone() as SyncCryptoStorePtr + } + } + + /// Returns the local keystore if available + /// + /// The function will return None if the available keystore is not a local keystore. + /// + /// # Note + /// + /// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore implementation, like + /// a remote keystore for example. Only use this if you a certain that you require it! + pub fn local_keystore(&self) -> Option> { + Some(self.local.clone()) } } @@ -261,7 +308,7 @@ pub fn new_full_parts( let task_manager = { let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), registry)? + TaskManager::new(config.task_executor.clone(), registry, config.telemetry_span.clone())? }; let executor = NativeExecutor::::new( @@ -284,8 +331,10 @@ pub fn new_full_parts( state_cache_size: config.state_cache_size, state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), + state_pruning: config.state_pruning.clone(), source: config.database.clone(), + keep_blocks: config.keep_blocks.clone(), + transaction_storage: config.transaction_storage.clone(), }; let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( @@ -320,7 +369,7 @@ pub fn new_full_parts( /// Create the initial parts of a light node. pub fn new_light_parts( - config: &Configuration + config: &Configuration, ) -> Result, Error> where TBl: BlockT, TExecDisp: NativeExecutionDispatch + 'static, @@ -328,7 +377,7 @@ pub fn new_light_parts( let keystore_container = KeystoreContainer::new(&config.keystore)?; let task_manager = { let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), registry)? + TaskManager::new(config.task_executor.clone(), registry, config.telemetry_span.clone())? }; let executor = NativeExecutor::::new( @@ -342,8 +391,10 @@ pub fn new_light_parts( state_cache_size: config.state_cache_size, state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), + state_pruning: config.state_pruning.clone(), source: config.database.clone(), + keep_blocks: config.keep_blocks.clone(), + transaction_storage: config.transaction_storage.clone(), }; sc_client_db::light::LightStorage::new(db_settings)? }; @@ -440,8 +491,6 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub network_status_sinks: NetworkStatusSinks, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, - /// Shared Telemetry connection sinks, - pub telemetry_connection_sinks: TelemetryConnectionSinks, } /// Build a shared offchain workers instance. @@ -488,7 +537,7 @@ pub fn build_offchain_workers( /// Spawn the tasks that are required to run a node. pub fn spawn_tasks( params: SpawnTasksParams, -) -> Result +) -> Result<(RpcHandlers, Option), Error> where TCl: ProvideRuntimeApi + HeaderMetadata + Chain + BlockBackend + BlockIdTo + ProofProvider + @@ -521,7 +570,6 @@ pub fn spawn_tasks( network, network_status_sinks, system_rpc_tx, - telemetry_connection_sinks, } = params; let chain_info = client.usage_info().chain; @@ -532,14 +580,14 @@ pub fn spawn_tasks( config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), )?; - info!("📦 Highest known block at #{}", chain_info.best_number); - telemetry!( - SUBSTRATE_INFO; - "node.start"; - "height" => chain_info.best_number.saturated_into::(), - "best" => ?chain_info.best_hash + let telemetry_connection_notifier = init_telemetry( + &mut config, + network.clone(), + client.clone(), ); + info!("📦 Highest known block at #{}", chain_info.best_number); + let spawn_handle = task_manager.spawn_handle(); // Inform the tx pool about imported and finalized blocks. @@ -588,32 +636,14 @@ pub fn spawn_tasks( on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, backend.offchain_storage(), system_rpc_tx.clone() ); - let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry()).ok(); - let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.as_ref())?; + let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; + let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone())?; // This is used internally, so don't restrict access to unsafe RPC let rpc_handlers = RpcHandlers(Arc::new(gen_handler( sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.as_ref().cloned(), "inbrowser") + sc_rpc_server::RpcMiddleware::new(rpc_metrics, "inbrowser") ).into())); - // Telemetry - let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| { - if endpoints.is_empty() { - // we don't want the telemetry to be initialized if telemetry_endpoints == Some([]) - return None; - } - - let genesis_hash = match client.block_hash(Zero::zero()) { - Ok(Some(hash)) => hash, - _ => Default::default(), - }; - - Some(build_telemetry( - &mut config, endpoints, telemetry_connection_sinks.clone(), network.clone(), - task_manager.spawn_handle(), genesis_hash, - )) - }); - // Spawn informant task spawn_handle.spawn("informant", sc_informant::build( client.clone(), @@ -622,21 +652,22 @@ pub fn spawn_tasks( config.informant_output_format, )); - task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); + task_manager.keep_alive((config.base_path, rpc, rpc_handlers.clone())); - Ok(rpc_handlers) + Ok((rpc_handlers, telemetry_connection_notifier)) } async fn transaction_notifications( transaction_pool: Arc, - network: Arc::Hash>> + network: Arc::Hash>>, ) where TBl: BlockT, TExPool: MaintainedTransactionPool::Hash>, { // transaction notifications - transaction_pool.import_notification_stream() + transaction_pool + .import_notification_stream() .for_each(move |hash| { network.propagate_transaction(hash); let status = transaction_pool.status(); @@ -649,55 +680,35 @@ async fn transaction_notifications( .await; } -fn build_telemetry( +fn init_telemetry>( config: &mut Configuration, - endpoints: sc_telemetry::TelemetryEndpoints, - telemetry_connection_sinks: TelemetryConnectionSinks, network: Arc::Hash>>, - spawn_handle: SpawnTaskHandle, - genesis_hash: ::Hash, -) -> sc_telemetry::Telemetry { - let is_authority = config.role.is_authority(); - let network_id = network.local_peer_id().to_base58(); - let name = config.network.node_name.clone(); - let impl_name = config.impl_name.clone(); - let impl_version = config.impl_version.clone(); - let chain_name = config.chain_spec.name().to_owned(); - let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { - endpoints, - wasm_external_transport: config.telemetry_external_transport.take(), - }); - let startup_time = SystemTime::UNIX_EPOCH.elapsed() - .map(|dur| dur.as_millis()) - .unwrap_or(0); - - spawn_handle.spawn( - "telemetry-worker", - telemetry.clone() - .for_each(move |event| { - // Safe-guard in case we add more events in the future. - let sc_telemetry::TelemetryEvent::Connected = event; - - telemetry!(SUBSTRATE_INFO; "system.connected"; - "name" => name.clone(), - "implementation" => impl_name.clone(), - "version" => impl_version.clone(), - "config" => "", - "chain" => chain_name.clone(), - "genesis_hash" => ?genesis_hash, - "authority" => is_authority, - "startup_time" => startup_time, - "network_id" => network_id.clone() - ); - - telemetry_connection_sinks.0.lock().retain(|sink| { - sink.unbounded_send(()).is_ok() - }); - ready(()) - }) - ); + client: Arc, +) -> Option { + let telemetry_span = config.telemetry_span.clone()?; + let endpoints = config.telemetry_endpoints.clone()?; + let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); + let connection_message = ConnectionMessage { + name: config.network.node_name.to_owned(), + implementation: config.impl_name.to_owned(), + version: config.impl_version.to_owned(), + config: String::new(), + chain: config.chain_spec.name().to_owned(), + genesis_hash: format!("{:?}", genesis_hash), + authority: config.role.is_authority(), + startup_time: SystemTime::UNIX_EPOCH.elapsed() + .map(|dur| dur.as_millis()) + .unwrap_or(0).to_string(), + network_id: network.local_peer_id().to_base58(), + }; - telemetry + config.telemetry_handle + .as_mut() + .map(|handle| handle.start_telemetry( + telemetry_span, + endpoints, + connection_message, + )) } fn gen_handler( @@ -815,10 +826,6 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub block_announce_validator_builder: Option) -> Box + Send> + Send >>, - /// An optional finality proof request builder. - pub finality_proof_request_builder: Option>, - /// An optional, shared finality proof request provider. - pub finality_proof_provider: Option>>, } /// Build the network service, the network status sinks and an RPC sender. @@ -843,7 +850,7 @@ pub fn build_network( { let BuildNetworkParams { config, client, transaction_pool, spawn_handle, import_queue, on_demand, - block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider, + block_announce_validator_builder, } = params; let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { @@ -852,18 +859,7 @@ pub fn build_network( client: client.clone(), }); - let protocol_id = { - let protocol_id_full = match config.chain_spec.protocol_id() { - Some(pid) => pid, - None => { - warn!("Using default protocol ID {:?} because none is configured in the \ - chain specs", DEFAULT_PROTOCOL_ID - ); - DEFAULT_PROTOCOL_ID - } - }; - sc_network::config::ProtocolId::from(protocol_id_full) - }; + let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) @@ -871,6 +867,36 @@ pub fn build_network( Box::new(DefaultBlockAnnounceValidator) }; + let block_request_protocol_config = { + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + block_request_handler::generate_protocol_config(&protocol_id) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + client.clone(), + ); + spawn_handle.spawn("block_request_handler", handler.run()); + protocol_config + } + }; + + let light_client_request_protocol_config = { + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + light_client_requests::generate_protocol_config(&protocol_id) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + client.clone(), + ); + spawn_handle.spawn("light_client_request_handler", handler.run()); + protocol_config + } + }; + let network_params = sc_network::config::Params { role: config.role.clone(), executor: { @@ -881,14 +907,14 @@ pub fn build_network( }, network_config: config.network.clone(), chain: client.clone(), - finality_proof_provider, - finality_proof_request_builder, on_demand: on_demand, transaction_pool: transaction_pool_adapter as _, import_queue: Box::new(import_queue), protocol_id, block_announce_validator, - metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()) + metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), + block_request_protocol_config, + light_client_request_protocol_config, }; let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs index 34baeb55445a8..94f6d25c9eb8f 100644 --- a/client/service/src/chain_ops/check_block.rs +++ b/client/service/src/chain_ops/check_block.rs @@ -1,18 +1,20 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; use futures::{future, prelude::*}; diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs index 3d2dbcbb9d00f..1d9325d1d7452 100644 --- a/client/service/src/chain_ops/export_blocks.rs +++ b/client/service/src/chain_ops/export_blocks.rs @@ -1,18 +1,20 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; use log::info; diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 3fe44dbdb142d..71822cf6275f8 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -1,18 +1,20 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; use sp_runtime::traits::Block as BlockT; diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index 74a33c6557c9a..3f918e05120e9 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs index af6e6f632fc06..c213e745a5d6b 100644 --- a/client/service/src/chain_ops/mod.rs +++ b/client/service/src/chain_ops/mod.rs @@ -1,18 +1,20 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Chain utilities. diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index eaee2c03f9b31..e3301eb2627e2 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -1,18 +1,20 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; use log::info; diff --git a/client/service/src/client/block_rules.rs b/client/service/src/client/block_rules.rs index be84614c2a590..1af06666339cc 100644 --- a/client/service/src/client/block_rules.rs +++ b/client/service/src/client/block_rules.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 164976ecfe871..cc196f67a37aa 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -29,7 +29,6 @@ use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; use sp_core::{ NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, SpawnNamed, RuntimeCode}, - offchain::storage::OffchainOverlayedChanges, }; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use sc_client_api::{backend, call_executor::CallExecutor}; @@ -127,23 +126,19 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let mut offchain_changes = if self.client_config.offchain_indexing_api { - OffchainOverlayedChanges::enabled() - } else { - OffchainOverlayedChanges::disabled() - }; let changes_trie = backend::changes_tries_state_at_block( id, self.backend.changes_trie_storage() )?; let state = self.backend.state_at(*id)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = self.check_override(state_runtime_code.runtime_code()?, id)?; + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, id)?; let return_data = StateMachine::new( &state, changes_trie, &mut changes, - &mut offchain_changes, &self.executor, method, call_data, @@ -174,7 +169,6 @@ where method: &str, call_data: &[u8], changes: &RefCell, - offchain_changes: &RefCell, storage_transaction_cache: Option<&RefCell< StorageTransactionCache >>, @@ -199,7 +193,6 @@ where let mut state = self.backend.state_at(*at)?; let changes = &mut *changes.borrow_mut(); - let offchain_changes = &mut *offchain_changes.borrow_mut(); match recorder { Some(recorder) => { @@ -211,7 +204,9 @@ where let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); // It is important to extract the runtime code here before we create the proof // recorder. - let runtime_code = self.check_override(state_runtime_code.runtime_code()?, at)?; + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, @@ -222,7 +217,6 @@ where &backend, changes_trie_state, changes, - offchain_changes, &self.executor, method, call_data, @@ -236,13 +230,14 @@ where }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = self.check_override(state_runtime_code.runtime_code()?, at)?; + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; let mut state_machine = StateMachine::new( &state, changes_trie_state, changes, - offchain_changes, &self.executor, method, call_data, @@ -257,7 +252,6 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let changes_trie_state = backend::changes_tries_state_at_block( id, self.backend.changes_trie_storage(), @@ -266,14 +260,15 @@ where let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, &state, changes_trie_state, None, ); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - self.executor.runtime_version(&mut ext, &state_runtime_code.runtime_code()?) + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + self.executor.runtime_version(&mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } @@ -284,6 +279,9 @@ where method: &str, call_data: &[u8] ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_state); + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( trie_state, overlay, @@ -291,7 +289,7 @@ where self.spawn_handle.clone(), method, call_data, - &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + &runtime_code, ) .map_err(Into::into) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d423fdee39b6c..8cb0e304cdad5 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -297,7 +297,8 @@ impl Client where config: ClientConfig, ) -> sp_blockchain::Result { if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { - let genesis_storage = build_genesis_storage.build_storage()?; + let genesis_storage = build_genesis_storage.build_storage() + .map_err(sp_blockchain::Error::Storage)?; let mut op = backend.begin_operation()?; backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; let state_root = op.reset_storage(genesis_storage)?; @@ -400,7 +401,7 @@ impl Client where storage: &'a dyn ChangesTrieStorage, NumberFor>, min: NumberFor, required_roots_proofs: Mutex, Block::Hash>>, - }; + } impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for AccessedRootsRecorder<'a, Block> @@ -880,7 +881,7 @@ impl Client where &state, changes_trie_state.as_ref(), *parent_hash, - )?; + ).map_err(sp_blockchain::Error::Storage)?; if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root @@ -1159,12 +1160,12 @@ impl Client where /// Prepare in-memory header that is used in execution environment. fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { - let parent_header = self.backend.blockchain().expect_header(*parent)?; + let parent_hash = self.backend.blockchain().expect_block_hash_from_id(parent)?; Ok(<::Header as HeaderT>::new( self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(), Default::default(), Default::default(), - parent_header.hash(), + parent_hash, Default::default(), )) } @@ -1305,7 +1306,7 @@ impl BlockBuilderProvider for Client ExecutorProvider for Client where +impl ExecutorProvider for Client where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1653,7 +1654,6 @@ impl CallApiAt for Client where params.function, ¶ms.arguments, params.overlayed_changes, - params.offchain_changes, Some(params.storage_transaction_cache), params.initialize_block, manager, @@ -1900,8 +1900,7 @@ impl BlockBackend for Client self.body(id) } - fn block(&self, id: &BlockId) -> sp_blockchain::Result>> - { + fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { (Some(header), Some(extrinsics), justification) => Some(SignedBlock { block: Block::new(header, extrinsics), justification }), @@ -1910,26 +1909,7 @@ impl BlockBackend for Client } fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { - // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); - } - } - let hash_and_number = match id.clone() { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; - match hash_and_number { - Some((hash, number)) => { - if self.backend.have_state_at(&hash, number) { - Ok(BlockStatus::InChainWithState) - } else { - Ok(BlockStatus::InChainPruned) - } - } - None => Ok(BlockStatus::Unknown), - } + Client::block_status(self, id) } fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { @@ -1939,6 +1919,14 @@ impl BlockBackend for Client fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { self.backend.blockchain().hash(number) } + + fn extrinsic(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + self.backend.blockchain().extrinsic(hash) + } + + fn have_extrinsic(&self, hash: &Block::Hash) -> sp_blockchain::Result { + self.backend.blockchain().have_extrinsic(hash) + } } impl backend::AuxStore for Client diff --git a/client/service/src/client/genesis.rs b/client/service/src/client/genesis.rs index 4df08025e3826..08235f7efb6e3 100644 --- a/client/service/src/client/genesis.rs +++ b/client/service/src/client/genesis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index 6d4f9aa1c9d1b..5b5c0cb0eb38f 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/src/client/mod.rs b/client/service/src/client/mod.rs index b3aa2fa076af5..06f48048f8f2b 100644 --- a/client/service/src/client/mod.rs +++ b/client/service/src/client/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -23,22 +23,23 @@ //! //! - A database containing the blocks and chain state, generally referred to as //! the [`Backend`](sc_client_api::backend::Backend). -//! - A runtime environment, generally referred to as the [`Executor`](CallExecutor). +//! - A runtime environment, generally referred to as the +//! [`Executor`](sc_client_api::call_executor::CallExecutor). //! //! # Initialization //! //! Creating a [`Client`] is done by calling the `new` method and passing to it a -//! [`Backend`](sc_client_api::backend::Backend) and an [`Executor`](CallExecutor). +//! [`Backend`](sc_client_api::backend::Backend) and an +//! [`Executor`](sc_client_api::call_executor::CallExecutor). //! //! The former is typically provided by the `sc-client-db` crate. //! //! The latter typically requires passing one of: //! //! - A [`LocalCallExecutor`] running the runtime locally. -//! - A [`RemoteCallExecutor`](light::call_executor::RemoteCallRequest) that will ask a +//! - A [`RemoteCallExecutor`](sc_client_api::light::RemoteCallRequest) that will ask a //! third-party to perform the executions. -//! - A [`RemoteOrLocalCallExecutor`](light::call_executor::RemoteOrLocalCallExecutor), combination -//! of the two. +//! - A [`RemoteOrLocalCallExecutor`](sc_client_api::light::LocalOrRemote), combination of the two. //! //! Additionally, the fourth generic parameter of the `Client` is a marker type representing //! the ways in which the runtime can interface with the outside. Any code that builds a `Client` diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 1025b9633887d..aca29694fca82 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -37,7 +37,8 @@ //! needed must be provided in the given directory. //! use std::{ - fs, collections::{HashMap, hash_map::DefaultHasher}, path::Path, + fs, collections::{HashMap, hash_map::DefaultHasher}, + path::{Path, PathBuf}, hash::Hasher as _, }; use sp_core::traits::FetchRuntimeCode; @@ -82,6 +83,29 @@ impl FetchRuntimeCode for WasmBlob { } } +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum WasmOverrideError { + #[error("Failed to get runtime version: {0}")] + VersionInvalid(String), + + #[error("WASM override IO error")] + Io(PathBuf, #[source] std::io::Error), + + #[error("Overwriting WASM requires a directory where local \ + WASM is stored. {} is not a directory", .0.display())] + NotADirectory(PathBuf), + + #[error("Duplicate WASM Runtimes found: \n{}\n", .0.join("\n") )] + DuplicateRuntime(Vec), +} + +impl From for sp_blockchain::Error { + fn from(err: WasmOverrideError) -> Self { + Self::Application(Box::new(err)) + } +} + /// Scrapes WASM from a folder and returns WASM from that folder /// if the runtime spec version matches. #[derive(Clone, Debug)] @@ -119,16 +143,13 @@ where /// Scrapes a folder for WASM runtimes. /// Returns a hashmap of the runtime version and wasm runtime code. fn scrape_overrides(dir: &Path, executor: &E) -> Result> { + let handle_err = |e: std::io::Error | -> sp_blockchain::Error { - sp_blockchain::Error::Msg(format!("{}", e.to_string())) + WasmOverrideError::Io(dir.to_owned(), e).into() }; if !dir.is_dir() { - return Err(sp_blockchain::Error::Msg(format!( - "Overwriting WASM requires a directory where \ - local WASM is stored. {:?} is not a directory", - dir, - ))); + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); } let mut overrides = HashMap::new(); @@ -149,9 +170,7 @@ where } if !duplicates.is_empty() { - let duplicate_file_list = duplicates.join("\n"); - let msg = format!("Duplicate WASM Runtimes found: \n{}\n", duplicate_file_list); - return Err(sp_blockchain::Error::Msg(msg)); + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); } Ok(overrides) @@ -164,7 +183,7 @@ where ) -> Result { let mut ext = BasicExternalities::default(); executor.runtime_version(&mut ext, &code.runtime_code(heap_pages)) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) + .map_err(|e| WasmOverrideError::VersionInvalid(format!("{:?}", e)).into()) } } @@ -236,14 +255,10 @@ mod tests { let scraped = WasmOverride::scrape_overrides(dir, exec); match scraped { - Err(e) => { - match e { - sp_blockchain::Error::Msg(msg) => { - let is_match = msg - .matches("Duplicate WASM Runtimes found") - .map(ToString::to_string) - .collect::>(); - assert!(is_match.len() >= 1) + Err(sp_blockchain::Error::Application(e)) => { + match e.downcast_ref::() { + Some(WasmOverrideError::DuplicateRuntime(duplicates)) => { + assert_eq!(duplicates.len(), 1); }, _ => panic!("Test should end with Msg Error Variant") } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 0caf05b2485db..1e316c37dc9a4 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,7 +18,10 @@ //! Service configuration. -pub use sc_client_db::{Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig}; +pub use sc_client_db::{ + Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig, + KeepBlocks, TransactionStorageMode +}; pub use sc_network::Multiaddr; pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; pub use sc_executor::WasmExecutionMethod; @@ -50,14 +53,20 @@ pub struct Configuration { pub network: NetworkConfiguration, /// Configuration for the keystore. pub keystore: KeystoreConfig, + /// Remote URI to connect to for async keystore support + pub keystore_remote: Option, /// Configuration for the database. pub database: DatabaseConfig, /// Size of internal state cache in Bytes pub state_cache_size: usize, /// Size in percent of cache size dedicated to child tries pub state_cache_child_ratio: Option, - /// Pruning settings. - pub pruning: PruningMode, + /// State pruning settings. + pub state_pruning: PruningMode, + /// Number of blocks to keep in the db. + pub keep_blocks: KeepBlocks, + /// Transaction storage scheme. + pub transaction_storage: TransactionStorageMode, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. @@ -87,6 +96,15 @@ pub struct Configuration { /// External WASM transport for the telemetry. If `Some`, when connection to a telemetry /// endpoint, this transport will be tried in priority before all others. pub telemetry_external_transport: Option, + /// Telemetry handle. + /// + /// This is a handle to a `TelemetryWorker` instance. It is used to initialize the telemetry for + /// a substrate node. + pub telemetry_handle: Option, + /// Telemetry span. + /// + /// This span is entered for every background task spawned using the TaskManager. + pub telemetry_span: Option, /// The default number of 64KB pages to allocate for Wasm execution pub default_heap_pages: Option, /// Should offchain workers be executed. @@ -103,6 +121,8 @@ pub struct Configuration { pub dev_key_seed: Option, /// Tracing targets pub tracing_targets: Option, + /// Is log filter reloading disabled + pub disable_log_reloading: bool, /// Tracing receiver pub tracing_receiver: sc_tracing::TracingReceiver, /// The size of the instances cache. @@ -187,9 +207,23 @@ impl Configuration { } /// Returns the prometheus metrics registry, if available. - pub fn prometheus_registry<'a>(&'a self) -> Option<&'a Registry> { + pub fn prometheus_registry(&self) -> Option<&Registry> { self.prometheus_config.as_ref().map(|config| &config.registry) } + + /// Returns the network protocol id from the chain spec, or the default. + pub fn protocol_id(&self) -> sc_network::config::ProtocolId { + let protocol_id_full = match self.chain_spec.protocol_id() { + Some(pid) => pid, + None => { + log::warn!("Using default protocol ID {:?} because none is configured in the \ + chain specs", crate::DEFAULT_PROTOCOL_ID + ); + crate::DEFAULT_PROTOCOL_ID + } + }; + sc_network::config::ProtocolId::from(protocol_id_full) + } } /// Available RPC methods. @@ -259,6 +293,13 @@ impl BasePath { BasePath::Permanenent(path) => path.as_path(), } } + + /// Returns the configuration directory inside this base path. + /// + /// The path looks like `$base_path/chains/$chain_id` + pub fn config_dir(&self, chain_id: &str) -> PathBuf { + self.path().join("chains").join(chain_id) + } } impl std::convert::From for BasePath { diff --git a/client/service/src/error.rs b/client/service/src/error.rs index ffe1b39405501..31c3cea4ef43b 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -27,25 +27,38 @@ use sp_blockchain; pub type Result = std::result::Result; /// Service errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] pub enum Error { - /// Client error. - Client(sp_blockchain::Error), - /// IO error. - Io(std::io::Error), - /// Consensus error. - Consensus(sp_consensus::Error), - /// Network error. - Network(sc_network::error::Error), - /// Keystore error. - Keystore(sc_keystore::Error), - /// Best chain selection strategy is missing. - #[display(fmt="Best chain selection strategy (SelectChain) is not provided.")] + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Consensus(#[from] sp_consensus::Error), + + #[error(transparent)] + Network(#[from] sc_network::error::Error), + + #[error(transparent)] + Keystore(#[from] sc_keystore::Error), + + #[error("Best chain selection strategy (SelectChain) is not provided.")] SelectChainRequired, - /// Tasks executor is missing. - #[display(fmt="Tasks executor hasn't been provided.")] + + #[error("Tasks executor hasn't been provided.")] TaskExecutorRequired, - /// Other error. + + #[error("Prometheus metrics error")] + Prometheus(#[from] prometheus_endpoint::PrometheusError), + + #[error("Application")] + Application(#[from] Box), + + #[error("Other: {0}")] Other(String), } @@ -55,21 +68,8 @@ impl<'a> From<&'a str> for Error { } } -impl From for Error { - fn from(e: prometheus_endpoint::PrometheusError) -> Self { - Error::Other(format!("Prometheus error: {}", e)) - } -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(err), - Error::Io(ref err) => Some(err), - Error::Consensus(ref err) => Some(err), - Error::Network(ref err) => Some(err), - Error::Keystore(ref err) => Some(err), - _ => None, - } +impl<'a> From for Error { + fn from(s: String) -> Self { + Error::Other(s) } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index cb741c2920b06..4880b8cffdafe 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -39,7 +39,6 @@ use std::net::SocketAddr; use std::collections::HashMap; use std::time::Duration; use std::task::Poll; -use parking_lot::Mutex; use futures::{Future, FutureExt, Stream, StreamExt, stream, compat::*}; use sc_network::{NetworkStatus, network_state::NetworkState, PeerId}; @@ -48,7 +47,7 @@ use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use parity_util_mem::MallocSizeOf; -use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}}; +use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver}}; pub use self::error::Error; pub use self::builder::{ @@ -60,6 +59,7 @@ pub use self::builder::{ }; pub use config::{ BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskExecutor, TaskType, + KeepBlocks, TransactionStorageMode, }; pub use sc_chain_spec::{ ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, @@ -73,13 +73,14 @@ pub use sc_executor::NativeExecutionDispatch; pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] pub use sc_network::config::{ - FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder, TransactionImport, + OnDemand, TransactionImport, TransactionImportFuture, }; pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; pub use sp_consensus::import_queue::ImportQueue; +pub use self::client::{LocalCallExecutor, ClientConfig}; use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -159,20 +160,7 @@ impl NetworkStatusSinks { } -/// Sinks to propagate telemetry connection established events. -#[derive(Default, Clone)] -pub struct TelemetryConnectionSinks(Arc>>>); - -impl TelemetryConnectionSinks { - /// Get event stream for telemetry connection established events. - pub fn on_connect_stream(&self) -> TracingUnboundedReceiver<()> { - let (sink, stream) =tracing_unbounded("mpsc_telemetry_on_connect"); - self.0.lock().push(sink); - stream - } -} - -/// An imcomplete set of chain components, but enough to run the chain ops subcommands. +/// An incomplete set of chain components, but enough to run the chain ops subcommands. pub struct PartialComponents { /// A shared client instance. pub client: Arc, @@ -246,11 +234,11 @@ async fn build_network_future< }; if announce_imported_blocks { - network.service().announce_block(notification.hash, Vec::new()); + network.service().announce_block(notification.hash, None); } - if let sp_consensus::BlockOrigin::Own = notification.origin { - network.service().own_block_imported( + if notification.is_new_best { + network.service().new_best_block_imported( notification.hash, notification.header.number().clone(), ); @@ -400,7 +388,7 @@ fn start_rpc_servers< >( config: &Configuration, mut gen_handler: H, - rpc_metrics: Option<&sc_rpc_server::RpcMetrics> + rpc_metrics: sc_rpc_server::RpcMetrics, ) -> Result, error::Error> { fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> where F: FnMut(&SocketAddr) -> Result, @@ -433,7 +421,7 @@ fn start_rpc_servers< config.rpc_ipc.as_ref().map(|path| sc_rpc_server::start_ipc( &*path, gen_handler( sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ipc") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ipc") ) )), maybe_start_server( @@ -443,7 +431,7 @@ fn start_rpc_servers< config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "http") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http") ), ), )?.map(|s| waiting::HttpServer(Some(s))), @@ -455,7 +443,7 @@ fn start_rpc_servers< config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ws") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws") ), ), )?.map(|s| waiting::WsServer(Some(s))), @@ -470,7 +458,7 @@ fn start_rpc_servers< >( _: &Configuration, _: H, - _: Option<&sc_rpc_server::RpcMetrics> + _: sc_rpc_server::RpcMetrics, ) -> Result, error::Error> { Ok(Box::new(())) } @@ -611,6 +599,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index d3ad780b5be62..446cce952741d 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index a0aeba3009dee..9a1fd15952e12 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -1,16 +1,21 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + //! Substrate service tasks management module. use std::{panic, result::Result, pin::Pin}; @@ -19,7 +24,7 @@ use log::{debug, error}; use futures::{ Future, FutureExt, StreamExt, future::{select, Either, BoxFuture, join_all, try_join_all, pending}, - sink::SinkExt, + sink::SinkExt, task::{Context, Poll}, }; use prometheus_endpoint::{ exponential_buckets, register, @@ -29,11 +34,43 @@ use prometheus_endpoint::{ use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; use tracing_futures::Instrument; use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error}; +use sc_telemetry::TelemetrySpan; mod prometheus_future; #[cfg(test)] mod tests; +/// A wrapper around a `[Option]` and a [`Future`]. +/// +/// The telemetry in Substrate uses a span to identify the telemetry context. The span "infrastructure" +/// is provided by the tracing-crate. Now it is possible to have your own spans as well. To support +/// this with the [`TaskManager`] we have this wrapper. This wrapper enters the telemetry span every +/// time the future is polled and polls the inner future. So, the inner future can still have its +/// own span attached and we get our telemetry span ;) +struct WithTelemetrySpan { + span: Option, + inner: T, +} + +impl WithTelemetrySpan { + fn new(span: Option, inner: T) -> Self { + Self { + span, + inner, + } + } +} + +impl + Unpin> Future for WithTelemetrySpan { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll { + let span = self.span.clone(); + let _enter = span.as_ref().map(|s| s.enter()); + Pin::new(&mut self.inner).poll(ctx) + } +} + /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { @@ -41,6 +78,7 @@ pub struct SpawnTaskHandle { executor: TaskExecutor, metrics: Option, task_notifier: TracingUnboundedSender, + telemetry_span: Option, } impl SpawnTaskHandle { @@ -117,7 +155,12 @@ impl SpawnTaskHandle { } }; - let join_handle = self.executor.spawn(Box::pin(future.in_current_span()), task_type); + let future = future.in_current_span().boxed(); + let join_handle = self.executor.spawn( + WithTelemetrySpan::new(self.telemetry_span.clone(), future).boxed(), + task_type, + ); + let mut task_notifier = self.task_notifier.clone(); self.executor.spawn( Box::pin(async move { @@ -223,14 +266,17 @@ pub struct TaskManager { /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. children: Vec, + /// A `TelemetrySpan` used to enter the telemetry span when a task is spawned. + telemetry_span: Option, } impl TaskManager { - /// If a Prometheus registry is passed, it will be used to report statistics about the - /// service tasks. + /// If a Prometheus registry is passed, it will be used to report statistics about the + /// service tasks. pub(super) fn new( executor: TaskExecutor, - prometheus_registry: Option<&Registry> + prometheus_registry: Option<&Registry>, + telemetry_span: Option, ) -> Result { let (signal, on_exit) = exit_future::signal(); @@ -259,6 +305,7 @@ impl TaskManager { task_notifier, completion_future, children: Vec::new(), + telemetry_span, }) } @@ -269,6 +316,7 @@ impl TaskManager { executor: self.executor.clone(), metrics: self.metrics.clone(), task_notifier: self.task_notifier.clone(), + telemetry_span: self.telemetry_span.clone(), } } diff --git a/client/service/src/task_manager/prometheus_future.rs b/client/service/src/task_manager/prometheus_future.rs index 53bd59aa7a507..6d2a52354d6ca 100644 --- a/client/service/src/task_manager/prometheus_future.rs +++ b/client/service/src/task_manager/prometheus_future.rs @@ -1,16 +1,21 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + //! Wrapper around a `Future` that reports statistics about when the `Future` is polled. use futures::prelude::*; diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index 27d9b0b9e9ad9..257f7db19870f 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,9 +20,10 @@ use crate::config::TaskExecutor; use crate::task_manager::TaskManager; use futures::{future::FutureExt, pin_mut, select}; use parking_lot::Mutex; -use std::any::Any; -use std::sync::Arc; -use std::time::Duration; +use std::{any::Any, sync::Arc, time::Duration}; +use tracing_subscriber::{layer::{SubscriberExt, Context}, Layer}; +use tracing::{subscriber::Subscriber, span::{Attributes, Id, Record, Span}, event::Event}; +use sc_telemetry::TelemetrySpan; #[derive(Clone, Debug)] struct DropTester(Arc>); @@ -81,13 +82,17 @@ async fn run_background_task_blocking(duration: Duration, _keep_alive: impl Any) } } +fn new_task_manager(task_executor: TaskExecutor) -> TaskManager { + TaskManager::new(task_executor, None, None).unwrap() +} + #[test] fn ensure_tasks_are_awaited_on_shutdown() { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let task_manager = TaskManager::new(task_executor, None).unwrap(); + let task_manager = new_task_manager(task_executor); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); @@ -106,7 +111,7 @@ fn ensure_keep_alive_during_shutdown() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(task_executor); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); task_manager.keep_alive(drop_tester.new_ref()); @@ -125,7 +130,7 @@ fn ensure_blocking_futures_are_awaited_on_shutdown() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let task_manager = TaskManager::new(task_executor, None).unwrap(); + let task_manager = new_task_manager(task_executor); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn( @@ -150,7 +155,7 @@ fn ensure_no_task_can_be_spawn_after_terminate() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(task_executor); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); @@ -171,7 +176,7 @@ fn ensure_task_manager_future_ends_when_task_manager_terminated() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(task_executor); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); @@ -192,7 +197,7 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(task_executor); let spawn_handle = task_manager.spawn_handle(); let spawn_essential_handle = task_manager.spawn_essential_handle(); let drop_tester = DropTester::new(); @@ -215,10 +220,10 @@ fn ensure_children_tasks_ends_when_task_manager_terminated() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); - let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let mut task_manager = new_task_manager(task_executor.clone()); + let child_1 = new_task_manager(task_executor.clone()); let spawn_handle_child_1 = child_1.spawn_handle(); - let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_2 = new_task_manager(task_executor.clone()); let spawn_handle_child_2 = child_2.spawn_handle(); task_manager.add_child(child_1); task_manager.add_child(child_2); @@ -244,11 +249,11 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); - let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let mut task_manager = new_task_manager(task_executor.clone()); + let child_1 = new_task_manager(task_executor.clone()); let spawn_handle_child_1 = child_1.spawn_handle(); let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); - let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_2 = new_task_manager(task_executor.clone()); let spawn_handle_child_2 = child_2.spawn_handle(); task_manager.add_child(child_1); task_manager.add_child(child_2); @@ -275,10 +280,10 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); - let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let mut task_manager = new_task_manager(task_executor.clone()); + let child_1 = new_task_manager(task_executor.clone()); let spawn_handle_child_1 = child_1.spawn_handle(); - let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_2 = new_task_manager(task_executor.clone()); let spawn_handle_child_2 = child_2.spawn_handle(); task_manager.add_child(child_1); task_manager.add_child(child_2); @@ -308,3 +313,94 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { runtime.block_on(task_manager.clean_shutdown()); assert_eq!(drop_tester, 0); } + +struct TestLayer { + spans_entered: Arc>>, + spans: Arc>>, +} + +impl Layer for TestLayer { + fn new_span(&self, attrs: &Attributes<'_>, id: &Id, _ctx: Context) { + self.spans.lock().insert(id.clone(), attrs.metadata().name().to_string()); + } + + fn on_record(&self, _: &Id, _: &Record<'_>, _: Context) {} + + fn on_event(&self, _: &Event<'_>, _: Context) {} + + fn on_enter(&self, span: &Id, _: Context) { + let name = self.spans.lock().get(span).unwrap().clone(); + self.spans_entered.lock().push(name); + } + + fn on_exit(&self, _: &Id, _: Context) {} + + fn on_close(&self, _: Id, _: Context) {} +} + +type TestSubscriber = tracing_subscriber::layer::Layered< + TestLayer, + tracing_subscriber::fmt::Subscriber +>; + +fn setup_subscriber() -> ( + TestSubscriber, + Arc>>, +) { + let spans_entered = Arc::new(Mutex::new(Default::default())); + let layer = TestLayer { + spans: Arc::new(Mutex::new(Default::default())), + spans_entered: spans_entered.clone(), + }; + let subscriber = tracing_subscriber::fmt().finish().with(layer); + (subscriber, spans_entered) +} + +#[test] +fn telemetry_span_is_forwarded_to_task() { + let (subscriber, spans_entered) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_global_default(subscriber); + + let telemetry_span = TelemetrySpan::new(); + + let span = tracing::info_span!("test"); + let _enter = span.enter(); + + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor = TaskExecutor::from(move |fut, _| handle.spawn(fut).map(|_| ())); + let task_manager = TaskManager::new(task_executor, None, Some(telemetry_span.clone())).unwrap(); + + let (sender, receiver) = futures::channel::oneshot::channel(); + let spawn_handle = task_manager.spawn_handle(); + + let span = span.clone(); + task_manager.spawn_handle().spawn( + "test", + async move { + assert_eq!(span, Span::current()); + spawn_handle.spawn("test-nested", async move { + assert_eq!(span, Span::current()); + sender.send(()).unwrap(); + }.boxed()); + }.boxed(), + ); + + // We need to leave exit the span here. If tokio is not running with multithreading, this + // would lead to duplicate spans being "active" and forwarding the wrong one. + drop(_enter); + runtime.block_on(receiver).unwrap(); + runtime.block_on(task_manager.clean_shutdown()); + drop(runtime); + + let spans = spans_entered.lock(); + // We entered the telemetry span and the "test" in the future, the nested future and + // the "test" span outside of the future. So, we should have recorded 3 spans. + assert_eq!(5, spans.len()); + + assert_eq!(spans[0], "test"); + assert_eq!(spans[1], telemetry_span.span().metadata().unwrap().name()); + assert_eq!(spans[2], "test"); + assert_eq!(spans[3], telemetry_span.span().metadata().unwrap().name()); + assert_eq!(spans[4], "test"); +} diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 84ac84e630d00..e55320d6c5fb7 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -18,27 +18,27 @@ tokio = "0.1.22" futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" fdlimit = "0.2.1" -parking_lot = "0.10.0" -sc-light = { version = "2.0.0", path = "../../light" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } -sp-storage = { version = "2.0.0", path = "../../../primitives/storage" } -sc-client-db = { version = "0.8.0", default-features = false, path = "../../db" } +parking_lot = "0.11.1" +sc-light = { version = "3.0.0", path = "../../light" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-state-machine = { version = "0.9.0", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.9.0", path = "../../../primitives/externalities" } +sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } +sp-storage = { version = "3.0.0", path = "../../../primitives/storage" } +sc-client-db = { version = "0.9.0", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } -sc-network = { version = "0.8.0", path = "../../network" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-network = { version = "0.9.0", path = "../../network" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } -sc-executor = { version = "0.8.0", path = "../../executor" } -sp-panic-handler = { version = "2.0.0", path = "../../../primitives/panic-handler" } -parity-scale-codec = "1.3.4" -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-client-api = { version = "3.0.0", path = "../../api" } +sc-block-builder = { version = "0.9.0", path = "../../block-builder" } +sc-executor = { version = "0.9.0", path = "../../executor" } +sp-panic-handler = { version = "3.0.0", path = "../../../primitives/panic-handler" } +parity-scale-codec = "2.0.0" +sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs index 36d49732246e5..a86e8f2de467c 100644 --- a/client/service/test/src/client/db.rs +++ b/client/service/test/src/client/db.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index f38aef008e11c..b6287741fdf34 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -36,7 +36,7 @@ use parking_lot::Mutex; use substrate_test_runtime_client::{ runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, }; -use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; +use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder}; use sp_consensus::BlockOrigin; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; @@ -223,7 +223,6 @@ impl CallExecutor for DummyCallExecutor { _method: &str, _call_data: &[u8], _changes: &RefCell, - _offchain_changes: &RefCell, _storage_transaction_cache: Option<&RefCell< StorageTransactionCache< Block, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 23d6a34297328..7498289c7be1a 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -31,7 +31,9 @@ use substrate_test_runtime_client::{ use sc_client_api::{ StorageProvider, BlockBackend, in_mem, BlockchainEvents, }; -use sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}; +use sc_client_db::{ + Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode, KeepBlocks, TransactionStorageMode +}; use sc_block_builder::BlockBuilderProvider; use sc_service::client::{self, Client, LocalCallExecutor, new_in_mem}; use sp_runtime::traits::{ @@ -39,7 +41,7 @@ use sp_runtime::traits::{ }; use substrate_test_runtime::TestAPI; use sp_state_machine::backend::Backend as _; -use sp_api::{ProvideRuntimeApi, OffchainOverlayedChanges}; +use sp_api::ProvideRuntimeApi; use sp_core::{H256, ChangesTrieConfiguration, blake2_256, testing::TaskExecutor}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -161,7 +163,6 @@ fn construct_block( }; let hash = header.hash(); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let task_executor = Box::new(TaskExecutor::new()); @@ -170,7 +171,6 @@ fn construct_block( backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_initialize_block", &header.encode(), @@ -186,7 +186,6 @@ fn construct_block( backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "BlockBuilder_apply_extrinsic", &tx.encode(), @@ -202,7 +201,6 @@ fn construct_block( backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "BlockBuilder_finalize_block", &[], @@ -250,13 +248,11 @@ fn construct_genesis_should_work_with_native() { let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let _ = StateMachine::new( &backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_execute_block", &b1data, @@ -286,13 +282,11 @@ fn construct_genesis_should_work_with_wasm() { let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let _ = StateMachine::new( &backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_execute_block", &b1data, @@ -322,13 +316,11 @@ fn construct_genesis_with_bad_transaction_should_panic() { let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let r = StateMachine::new( &backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_execute_block", &b1data, @@ -1275,7 +1267,9 @@ fn doesnt_import_blocks_that_revert_finality() { DatabaseSettings { state_cache_size: 1 << 20, state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, + state_pruning: PruningMode::ArchiveAll, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, source: DatabaseSettingsSrc::RocksDb { path: tmp.path().into(), cache_size: 1024, @@ -1476,7 +1470,9 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { state_cache_size: 1 << 20, state_cache_child_ratio: None, - pruning: PruningMode::keep_blocks(1), + state_pruning: PruningMode::keep_blocks(1), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, source: DatabaseSettingsSrc::RocksDb { path: tmp.path().into(), cache_size: 1024, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 8a9f0ace171d8..a42dba84dfeae 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -35,6 +35,7 @@ use sc_service::{ GenericChainSpec, ChainSpecExtension, Configuration, + KeepBlocks, TransactionStorageMode, config::{BasePath, DatabaseConfig, KeystoreConfig}, RuntimeGenesis, Role, @@ -239,6 +240,7 @@ fn node_config( make_block_and_import(&first_service, first_user_data); } - network.full_nodes[0].1.network().update_chain(); + let info = network.full_nodes[0].1.client().info(); + network.full_nodes[0].1.network().new_best_block_imported(info.best_hash, info.best_number); network.full_nodes[0].3.clone() }; diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 4d3e736d9539e..d61dd7fc125a1 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-state-db" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,10 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.10.0" -log = "0.4.8" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +thiserror = "1.0.21" +parking_lot = "0.11.1" +log = "0.4.11" +sc-client-api = { version = "3.0.0", path = "../api" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 61470894e487e..dd2baf9d18ace 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -107,7 +107,7 @@ impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Db(e) => e.fmt(f), - Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e.what()), + Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e), Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index d77f20c50d05f..551bf5fb860c7 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 69b07c285fad8..0c682d8954b13 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 11ce4ad822620..e1bb6d01c37e4 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 8da372db94ffc..3ec48ac9ec570 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-sync-state-rpc" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "A RPC handler to create sync states for light clients." edition = "2018" @@ -13,15 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +thiserror = "1.0.21" jsonrpc-core = "15.0" jsonrpc-core-client = "15.0" jsonrpc-derive = "15.0" -sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-consensus-babe = { version = "0.8.0", path = "../consensus/babe" } -sc-consensus-epochs = { version = "0.8.0", path = "../consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0", path = "../finality-grandpa" } -sc-rpc-api = { version = "0.8.0", path = "../rpc-api" } +sc-chain-spec = { version = "3.0.0", path = "../chain-spec" } +sc-client-api = { version = "3.0.0", path = "../api" } +sc-consensus-babe = { version = "0.9.0", path = "../consensus/babe" } +sc-consensus-epochs = { version = "0.9.0", path = "../consensus/epochs" } +sc-finality-grandpa = { version = "0.9.0", path = "../finality-grandpa" } +sc-rpc-api = { version = "0.9.0", path = "../rpc-api" } serde_json = "1.0.58" -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index fa433e5e31d2d..8466523116440 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -1,22 +1,26 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! A RPC handler to create sync states for light clients. //! Currently only usable with BABE + GRANDPA. +#![deny(unused_crate_dependencies)] + use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_blockchain::HeaderBackend; use std::sync::Arc; @@ -28,12 +32,27 @@ type SharedAuthoritySet = sc_finality_grandpa::SharedAuthoritySet<::Hash, NumberFor>; type SharedEpochChanges = sc_consensus_epochs::SharedEpochChanges; -struct Error(sp_blockchain::Error); +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +enum Error { + #[error(transparent)] + Blockchain(#[from] sp_blockchain::Error), + + #[error("Failed to load the block weight for block {0:?}")] + LoadingBlockWeightFailed(::Hash), + + #[error("JsonRpc error: {0}")] + JsonRpc(String), +} -impl From for jsonrpc_core::Error { - fn from(error: Error) -> Self { +impl From> for jsonrpc_core::Error { + fn from(error: Error) -> Self { + let message = match error { + Error::JsonRpc(s) => s, + _ => error.to_string(), + }; jsonrpc_core::Error { - message: error.0.to_string(), + message, code: jsonrpc_core::ErrorCode::ServerError(1), data: None, } @@ -76,20 +95,16 @@ impl SyncStateRpcHandler } } - fn build_sync_state(&self) -> Result, sp_blockchain::Error> { + fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; let finalized_header = self.client.header(BlockId::Hash(finalized_hash))? - .ok_or_else(|| sp_blockchain::Error::Msg( - format!("Failed to get the header for block {:?}", finalized_hash) - ))?; + .ok_or_else(|| sp_blockchain::Error::MissingHeader(finalized_hash.to_string()))?; let finalized_block_weight = sc_consensus_babe::aux_schema::load_block_weight( - &*self.client, - finalized_hash, - )? - .ok_or_else(|| sp_blockchain::Error::Msg( - format!("Failed to load the block weight for block {:?}", finalized_hash) - ))?; + &*self.client, + finalized_hash, + )? + .ok_or_else(|| Error::LoadingBlockWeightFailed(finalized_hash))?; Ok(sc_chain_spec::LightSyncState { finalized_block_header: finalized_header, @@ -114,15 +129,16 @@ impl SyncStateRpcApi for SyncStateRpcHandler let mut chain_spec = self.chain_spec.cloned_box(); - let sync_state = self.build_sync_state().map_err(Error)?; + let sync_state = self.build_sync_state() + .map_err(map_error::>)?; chain_spec.set_light_sync_state(sync_state.to_serializable()); - let string = chain_spec.as_json(raw).map_err(map_error)?; + let string = chain_spec.as_json(raw).map_err(map_error::)?; - serde_json::from_str(&string).map_err(|err| map_error(err.to_string())) + serde_json::from_str(&string).map_err(|err| map_error::(err)) } } -fn map_error(error: String) -> jsonrpc_core::Error { - Error(sp_blockchain::Error::Msg(error)).into() +fn map_error(error: S) -> jsonrpc_core::Error { + Error::::JsonRpc(error.to_string()).into() } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 18812a8c71e43..23b6936ff4057 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-telemetry" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" @@ -15,17 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.10.0" -futures = "0.3.4" -futures-timer = "3.0.1" -wasm-timer = "0.2.0" -libp2p = { version = "0.29.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +parking_lot = "0.11.1" +futures = "0.3.9" +wasm-timer = "0.2.5" +libp2p = { version = "0.34.0", default-features = false, features = ["dns", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" -pin-project = "0.4.6" +pin-project = "1.0.4" rand = "0.7.2" serde = { version = "1.0.101", features = ["derive"] } -slog = { version = "2.5.2", features = ["nested-values"] } -slog-json = { version = "2.3.0", features = ["nested-values"] } -slog-scope = "4.1.2" take_mut = "0.2.2" void = "1.0.2" +tracing = "0.1.10" +tracing-subscriber = "0.2.13" +serde_json = "1.0.41" +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } +chrono = "0.4.19" diff --git a/client/telemetry/README.md b/client/telemetry/README.md index 8fdf9e500722d..2e3e19bd2f628 100644 --- a/client/telemetry/README.md +++ b/client/telemetry/README.md @@ -1,45 +1,21 @@ -Telemetry utilities. +# sc-telemetry -Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`. -After that, calling `slog_scope::with_logger` will return a logger that sends information to -the telemetry endpoints. The `telemetry!` macro is a short-cut for calling -`slog_scope::with_logger` followed with `slog_log!`. +Substrate's client telemetry is a part of substrate that allows ingesting telemetry data +with for example [Polkadot telemetry](https://github.com/paritytech/substrate-telemetry). -Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at -the moment. Substrate may eventually be reworked to get proper `slog` support, including sending -information to the telemetry. +It works using Tokio's [tracing](https://github.com/tokio-rs/tracing/) library. The telemetry +information uses tracing's logging to report the telemetry data which is then retrieved by a +tracing `Layer`. This layer will then send the data through an asynchronous channel to a +background task called [`TelemetryWorker`] which will send the information to the configured +remote telemetry servers. -The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a -background thread/task) in order for the telemetry to properly function. Dropping the object -will also deregister the global logger and replace it with a logger that discards messages. -The `Stream` generates [`TelemetryEvent`]s. +If multiple substrate nodes are running in the same process, it uses a `tracing::Span` to +identify which substrate node is reporting the telemetry. Every task spawned using sc-service's +`TaskManager` automatically inherit this span. -> **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified behaviour. +Substrate's nodes initialize/register with the [`TelemetryWorker`] using a [`TelemetryHandle`]. +This handle can be cloned and passed around. It uses an asynchronous channel to communicate with +the running [`TelemetryWorker`] dedicated to registration. Registering can happen at any point +in time during the process execution. -# Example - -```rust -use futures::prelude::*; - -let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { - endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ - // The `0` is the maximum verbosity level of messages to send to this endpoint. - ("wss://example.com".into(), 0) - ]).expect("Invalid URL or multiaddr provided"), - // Can be used to pass an external implementation of WebSockets. - wasm_external_transport: None, -}); - -// The `telemetry` object implements `Stream` and must be processed. -std::thread::spawn(move || { - futures::executor::block_on(telemetry.for_each(|_| future::ready(()))); -}); - -// Sends a message on the telemetry. -sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test"; - "foo" => "bar", -) -``` - - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/telemetry/src/async_record.rs b/client/telemetry/src/async_record.rs deleted file mode 100644 index 34b7c1435afa1..0000000000000 --- a/client/telemetry/src/async_record.rs +++ /dev/null @@ -1,155 +0,0 @@ -//! # Internal types to ssync drain slog -//! FIXME: REMOVE THIS ONCE THE PR WAS MERGE -//! https://github.com/slog-rs/async/pull/14 - -use slog::{Record, RecordStatic, Level, SingleKV, KV, BorrowedKV}; -use slog::{Serializer, OwnedKVList, Key}; - -use std::fmt; -use take_mut::take; - -struct ToSendSerializer { - kv: Box, -} - -impl ToSendSerializer { - fn new() -> Self { - ToSendSerializer { kv: Box::new(()) } - } - - fn finish(self) -> Box { - self.kv - } -} - -impl Serializer for ToSendSerializer { - fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_unit(&mut self, key: Key) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); - Ok(()) - } - fn emit_none(&mut self, key: Key) -> slog::Result { - let val: Option<()> = None; - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_char(&mut self, key: Key, val: char) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { - let val = val.to_owned(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_arguments( - &mut self, - key: Key, - val: &fmt::Arguments, - ) -> slog::Result { - let val = fmt::format(*val); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - - fn emit_serde(&mut self, key: Key, value: &dyn slog::SerdeValue) -> slog::Result { - let val = value.to_sendable(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } -} - -pub(crate) struct AsyncRecord { - msg: String, - level: Level, - location: Box, - tag: String, - logger_values: OwnedKVList, - kv: Box, -} - -impl AsyncRecord { - /// Serializes a `Record` and an `OwnedKVList`. - pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { - let mut ser = ToSendSerializer::new(); - record - .kv() - .serialize(record, &mut ser) - .expect("`ToSendSerializer` can't fail"); - - AsyncRecord { - msg: fmt::format(*record.msg()), - level: record.level(), - location: Box::new(*record.location()), - tag: String::from(record.tag()), - logger_values: logger_values.clone(), - kv: ser.finish(), - } - } - - /// Deconstruct this `AsyncRecord` into a record and `OwnedKVList`. - pub fn as_record_values(&self, mut f: impl FnMut(&Record, &OwnedKVList)) { - let rs = RecordStatic { - location: &*self.location, - level: self.level, - tag: &self.tag, - }; - - f(&Record::new( - &rs, - &format_args!("{}", self.msg), - BorrowedKV(&self.kv), - ), &self.logger_values) - } -} diff --git a/client/telemetry/src/endpoints.rs b/client/telemetry/src/endpoints.rs new file mode 100644 index 0000000000000..fe4fa23974a64 --- /dev/null +++ b/client/telemetry/src/endpoints.rs @@ -0,0 +1,126 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use libp2p::Multiaddr; +use serde::{Deserialize, Deserializer, Serialize}; + +/// List of telemetry servers we want to talk to. Contains the URL of the server, and the +/// maximum verbosity level. +/// +/// The URL string can be either a URL or a multiaddress. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct TelemetryEndpoints( + #[serde(deserialize_with = "url_or_multiaddr_deser")] + pub(crate) Vec<(Multiaddr, u8)>, +); + +/// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. +fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + Vec::<(String, u8)>::deserialize(deserializer)? + .iter() + .map(|e| { + url_to_multiaddr(&e.0) + .map_err(serde::de::Error::custom) + .map(|m| (m, e.1)) + }) + .collect() +} + +impl TelemetryEndpoints { + /// Create a `TelemetryEndpoints` based on a list of `(String, u8)`. + pub fn new(endpoints: Vec<(String, u8)>) -> Result { + let endpoints: Result, libp2p::multiaddr::Error> = endpoints + .iter() + .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) + .collect(); + endpoints.map(Self) + } +} + +impl TelemetryEndpoints { + /// Return `true` if there are no telemetry endpoints, `false` otherwise. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// Parses a WebSocket URL into a libp2p `Multiaddr`. +fn url_to_multiaddr(url: &str) -> Result { + // First, assume that we have a `Multiaddr`. + let parse_error = match url.parse() { + Ok(ma) => return Ok(ma), + Err(err) => err, + }; + + // If not, try the `ws://path/url` format. + if let Ok(ma) = libp2p::multiaddr::from_url(url) { + return Ok(ma); + } + + // If we have no clue about the format of that string, assume that we were expecting a + // `Multiaddr`. + Err(parse_error) +} + +#[cfg(test)] +mod tests { + use super::url_to_multiaddr; + use super::TelemetryEndpoints; + use libp2p::Multiaddr; + + #[test] + fn valid_endpoints() { + let endp = vec![ + ("wss://telemetry.polkadot.io/submit/".into(), 3), + ("/ip4/80.123.90.4/tcp/5432".into(), 4), + ]; + let telem = + TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); + let mut res: Vec<(Multiaddr, u8)> = vec![]; + for (a, b) in endp.iter() { + res.push(( + url_to_multiaddr(a).expect("provided url should be valid"), + *b, + )) + } + assert_eq!(telem.0, res); + } + + #[test] + fn invalid_endpoints() { + let endp = vec![ + ("/ip4/...80.123.90.4/tcp/5432".into(), 3), + ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4), + ]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } + + #[test] + fn valid_and_invalid_endpoints() { + let endp = vec![ + ("/ip4/80.123.90.4/tcp/5432".into(), 3), + ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4), + ]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } +} diff --git a/client/telemetry/src/layer.rs b/client/telemetry/src/layer.rs new file mode 100644 index 0000000000000..0ce3f97620da9 --- /dev/null +++ b/client/telemetry/src/layer.rs @@ -0,0 +1,149 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{initialize_transport, TelemetryWorker}; +use futures::channel::mpsc; +use libp2p::wasm_ext::ExtTransport; +use parking_lot::Mutex; +use std::convert::TryInto; +use std::io; +use tracing::{Event, Id, Subscriber}; +use tracing_subscriber::{layer::Context, registry::LookupSpan, Layer}; + +/// Span name used to report the telemetry. +pub const TELEMETRY_LOG_SPAN: &str = "telemetry-logger"; + +/// `Layer` that handles the logs for telemetries. +#[derive(Debug)] +pub struct TelemetryLayer(Mutex>); + +impl TelemetryLayer { + /// Create a new [`TelemetryLayer`] and [`TelemetryWorker`]. + /// + /// The `buffer_size` defaults to 16. + /// + /// The [`ExtTransport`] is used in WASM contexts where we need some binding between the + /// networking provided by the operating system or environment and libp2p. + /// + /// > **Important**: Each individual call to `write` corresponds to one message. There is no + /// > internal buffering going on. In the context of WebSockets, each `write` + /// > must be one individual WebSockets frame. + pub fn new( + buffer_size: Option, + telemetry_external_transport: Option, + ) -> io::Result<(Self, TelemetryWorker)> { + let transport = initialize_transport(telemetry_external_transport)?; + let worker = TelemetryWorker::new(buffer_size.unwrap_or(16), transport); + let sender = worker.message_sender(); + Ok((Self(Mutex::new(sender)), worker)) + } +} + +impl Layer for TelemetryLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, +{ + fn on_event(&self, event: &Event<'_>, ctx: Context) { + if event.metadata().target() != TELEMETRY_LOG_SPAN { + return; + } + + if let Some(span) = ctx.lookup_current() { + let parents = span.parents(); + + if let Some(span) = std::iter::once(span) + .chain(parents) + .find(|x| x.name() == TELEMETRY_LOG_SPAN) + { + let id = span.id(); + let mut attrs = TelemetryAttrs::new(id.clone()); + let mut vis = TelemetryAttrsVisitor(&mut attrs); + event.record(&mut vis); + + if let TelemetryAttrs { + verbosity: Some(verbosity), + json: Some(json), + .. + } = attrs + { + match self.0.lock().try_send(( + id, + verbosity + .try_into() + .expect("telemetry log message verbosity are u8; qed"), + json, + )) { + Err(err) if err.is_full() => eprintln!("Telemetry buffer overflowed!"), + _ => {} + } + } else { + // NOTE: logging in this function doesn't work + eprintln!( + "missing fields in telemetry log: {:?}. This can happen if \ + `tracing::info_span!` is (mis-)used with the telemetry target \ + directly; you should use the `telemetry!` macro.", + event, + ); + } + } + } + } +} + +#[derive(Debug)] +struct TelemetryAttrs { + verbosity: Option, + json: Option, + id: Id, +} + +impl TelemetryAttrs { + fn new(id: Id) -> Self { + Self { + verbosity: None, + json: None, + id, + } + } +} + +#[derive(Debug)] +struct TelemetryAttrsVisitor<'a>(&'a mut TelemetryAttrs); + +impl<'a> tracing::field::Visit for TelemetryAttrsVisitor<'a> { + fn record_debug(&mut self, _field: &tracing::field::Field, _value: &dyn std::fmt::Debug) { + // noop + } + + fn record_u64(&mut self, field: &tracing::field::Field, value: u64) { + if field.name() == "verbosity" { + (*self.0).verbosity = Some(value); + } + } + + fn record_str(&mut self, field: &tracing::field::Field, value: &str) { + if field.name() == "json" { + (*self.0).json = Some(format!( + r#"{{"id":{},"ts":{:?},"payload":{}}}"#, + self.0.id.into_u64(), + chrono::Local::now().to_rfc3339().to_string(), + value, + )); + } + } +} diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 6a5ac0e0cb312..b398ee86de4ec 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,339 +16,482 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Telemetry utilities. +//! Substrate's client telemetry is a part of substrate that allows ingesting telemetry data +//! with for example [Polkadot telemetry](https://github.com/paritytech/substrate-telemetry). //! -//! Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`. -//! After that, calling `slog_scope::with_logger` will return a logger that sends information to -//! the telemetry endpoints. The `telemetry!` macro is a short-cut for calling -//! `slog_scope::with_logger` followed with `slog_log!`. +//! It works using Tokio's [tracing](https://github.com/tokio-rs/tracing/) library. The telemetry +//! information uses tracing's logging to report the telemetry data which is then retrieved by a +//! tracing `Layer`. This layer will then send the data through an asynchronous channel to a +//! background task called [`TelemetryWorker`] which will send the information to the configured +//! remote telemetry servers. //! -//! Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at -//! the moment. Substrate may eventually be reworked to get proper `slog` support, including sending -//! information to the telemetry. -//! -//! The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a -//! background thread/task) in order for the telemetry to properly function. Dropping the object -//! will also deregister the global logger and replace it with a logger that discards messages. -//! The `Stream` generates [`TelemetryEvent`]s. -//! -//! > **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified behaviour. -//! -//! # Example -//! -//! ```no_run -//! use futures::prelude::*; -//! -//! let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { -//! endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ -//! // The `0` is the maximum verbosity level of messages to send to this endpoint. -//! ("wss://example.com".into(), 0) -//! ]).expect("Invalid URL or multiaddr provided"), -//! // Can be used to pass an external implementation of WebSockets. -//! wasm_external_transport: None, -//! }); -//! -//! // The `telemetry` object implements `Stream` and must be processed. -//! std::thread::spawn(move || { -//! futures::executor::block_on(telemetry.for_each(|_| future::ready(()))); -//! }); -//! -//! // Sends a message on the telemetry. -//! sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test"; -//! "foo" => "bar", -//! ) -//! ``` +//! If multiple substrate nodes are running in the same process, it uses a `tracing::Span` to +//! identify which substrate node is reporting the telemetry. Every task spawned using sc-service's +//! `TaskManager` automatically inherit this span. //! +//! Substrate's nodes initialize/register with the [`TelemetryWorker`] using a [`TelemetryHandle`]. +//! This handle can be cloned and passed around. It uses an asynchronous channel to communicate with +//! the running [`TelemetryWorker`] dedicated to registration. Registering can happen at any point +//! in time during the process execution. + +#![warn(missing_docs)] -use futures::{prelude::*, channel::mpsc}; -use libp2p::{Multiaddr, wasm_ext}; +use futures::{channel::mpsc, prelude::*}; +use libp2p::Multiaddr; use log::{error, warn}; -use parking_lot::Mutex; -use serde::{Serialize, Deserialize, Deserializer}; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration}; -use wasm_timer::Instant; +use serde::Serialize; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; +use std::collections::HashMap; +use tracing::Id; pub use libp2p::wasm_ext::ExtTransport; -pub use slog_scope::with_logger; -pub use slog; - -mod async_record; -mod worker; - -/// Configuration for telemetry. -pub struct TelemetryConfig { - /// Collection of telemetry WebSocket servers with a corresponding verbosity level. - pub endpoints: TelemetryEndpoints, - - /// Optional external implementation of a libp2p transport. Used in WASM contexts where we need - /// some binding between the networking provided by the operating system or environment and - /// libp2p. - /// - /// This parameter exists whatever the target platform is, but it is expected to be set to - /// `Some` only when compiling for WASM. - /// - /// > **Important**: Each individual call to `write` corresponds to one message. There is no - /// > internal buffering going on. In the context of WebSockets, each `write` - /// > must be one individual WebSockets frame. - pub wasm_external_transport: Option, -} - -/// List of telemetry servers we want to talk to. Contains the URL of the server, and the -/// maximum verbosity level. -/// -/// The URL string can be either a URL or a multiaddress. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct TelemetryEndpoints( - #[serde(deserialize_with = "url_or_multiaddr_deser")] - Vec<(Multiaddr, u8)> -); - -/// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. -fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result, D::Error> - where D: Deserializer<'de> -{ - Vec::<(String, u8)>::deserialize(deserializer)? - .iter() - .map(|e| Ok((url_to_multiaddr(&e.0) - .map_err(serde::de::Error::custom)?, e.1))) - .collect() -} - -impl TelemetryEndpoints { - pub fn new(endpoints: Vec<(String, u8)>) -> Result { - let endpoints: Result, libp2p::multiaddr::Error> = endpoints.iter() - .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) - .collect(); - endpoints.map(Self) +pub use serde_json; +pub use tracing; + +mod endpoints; +mod layer; +mod node; +mod transport; + +pub use endpoints::*; +pub use layer::*; +use node::*; +use transport::*; + +/// Substrate DEBUG log level. +pub const SUBSTRATE_DEBUG: u8 = 9; +/// Substrate INFO log level. +pub const SUBSTRATE_INFO: u8 = 0; + +/// Consensus TRACE log level. +pub const CONSENSUS_TRACE: u8 = 9; +/// Consensus DEBUG log level. +pub const CONSENSUS_DEBUG: u8 = 5; +/// Consensus WARN log level. +pub const CONSENSUS_WARN: u8 = 4; +/// Consensus INFO log level. +pub const CONSENSUS_INFO: u8 = 1; + +pub(crate) type TelemetryMessage = (Id, u8, String); + +/// A handle representing a telemetry span, with the capability to enter the span if it exists. +#[derive(Debug, Clone)] +pub struct TelemetrySpan(tracing::Span); + +impl TelemetrySpan { + /// Enters this span, returning a guard that will exit the span when dropped. + pub fn enter(&self) -> tracing::span::Entered { + self.0.enter() } -} -impl TelemetryEndpoints { - /// Return `true` if there are no telemetry endpoints, `false` otherwise. - pub fn is_empty(&self) -> bool { - self.0.is_empty() + /// Constructs a new [`TelemetrySpan`]. + pub fn new() -> Self { + Self(tracing::error_span!(TELEMETRY_LOG_SPAN)) } -} -/// Parses a WebSocket URL into a libp2p `Multiaddr`. -fn url_to_multiaddr(url: &str) -> Result { - // First, assume that we have a `Multiaddr`. - let parse_error = match url.parse() { - Ok(ma) => return Ok(ma), - Err(err) => err, - }; - - // If not, try the `ws://path/url` format. - if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma) + /// Return a clone of the underlying `tracing::Span` instance. + pub fn span(&self) -> tracing::Span { + self.0.clone() } - - // If we have no clue about the format of that string, assume that we were expecting a - // `Multiaddr`. - Err(parse_error) } -/// Log levels. -pub const SUBSTRATE_DEBUG: &str = "9"; -pub const SUBSTRATE_INFO: &str = "0"; - -pub const CONSENSUS_TRACE: &str = "9"; -pub const CONSENSUS_DEBUG: &str = "5"; -pub const CONSENSUS_WARN: &str = "4"; -pub const CONSENSUS_INFO: &str = "1"; - -/// Telemetry object. Implements `Future` and must be polled regularly. -/// Contains an `Arc` and can be cloned and pass around. Only one clone needs to be polled -/// regularly and should be polled regularly. -/// Dropping all the clones unregisters the telemetry. -#[derive(Clone)] -pub struct Telemetry { - inner: Arc>, - /// Slog guard so that we don't get deregistered. - _guard: Arc, +/// Message sent when the connection (re-)establishes. +#[derive(Debug, Serialize)] +pub struct ConnectionMessage { + /// Node's name. + pub name: String, + /// Node's implementation. + pub implementation: String, + /// Node's version. + pub version: String, + /// Node's configuration. + pub config: String, + /// Node's chain. + pub chain: String, + /// Node's genesis hash. + pub genesis_hash: String, + /// Node is an authority. + pub authority: bool, + /// Node's startup time. + pub startup_time: String, + /// Node's network ID. + pub network_id: String, } -/// Behind the `Mutex` in `Telemetry`. +/// Telemetry worker. /// -/// Note that ideally we wouldn't have to make the `Telemetry` cloneable, as that would remove the -/// need for a `Mutex`. However there is currently a weird hack in place in `sc-service` -/// where we extract the telemetry registration so that it continues running during the shutdown -/// process. -struct TelemetryInner { - /// Worker for the telemetry. `None` if it failed to initialize. - worker: Option, - /// Receives log entries for them to be dispatched to the worker. - receiver: mpsc::Receiver, +/// It should run as a background task using the [`TelemetryWorker::run`] method. This method +/// will consume the object and any further attempts of initializing a new telemetry through its +/// handle will fail (without being fatal). +#[derive(Debug)] +pub struct TelemetryWorker { + message_receiver: mpsc::Receiver, + message_sender: mpsc::Sender, + register_receiver: mpsc::UnboundedReceiver, + register_sender: mpsc::UnboundedSender, + transport: WsTrans, } -/// Implements `slog::Drain`. -struct TelemetryDrain { - /// Sends log entries. - sender: std::panic::AssertUnwindSafe>, -} +impl TelemetryWorker { + pub(crate) fn new(buffer_size: usize, transport: WsTrans) -> Self { + let (message_sender, message_receiver) = mpsc::channel(buffer_size); + let (register_sender, register_receiver) = mpsc::unbounded(); + + Self { + message_receiver, + message_sender, + register_receiver, + register_sender, + transport, + } + } -/// Initializes the telemetry. See the crate root documentation for more information. -/// -/// Please be careful to not call this function twice in the same program. The `slog` crate -/// doesn't provide any way of knowing whether a global logger has already been registered. -pub fn init_telemetry(config: TelemetryConfig) -> Telemetry { - // Build the list of telemetry endpoints. - let (endpoints, wasm_external_transport) = (config.endpoints.0, config.wasm_external_transport); - - let (sender, receiver) = mpsc::channel(16); - let guard = { - let logger = TelemetryDrain { sender: std::panic::AssertUnwindSafe(sender) }; - let root = slog::Logger::root(slog::Drain::fuse(logger), slog::o!()); - slog_scope::set_global_logger(root) - }; - - let worker = match worker::TelemetryWorker::new(endpoints, wasm_external_transport) { - Ok(w) => Some(w), - Err(err) => { - error!(target: "telemetry", "Failed to initialize telemetry worker: {:?}", err); - None + /// Get a new [`TelemetryHandle`]. + /// + /// This is used when you want to register with the [`TelemetryWorker`]. + pub fn handle(&self) -> TelemetryHandle { + TelemetryHandle { + message_sender: self.register_sender.clone(), } - }; + } - Telemetry { - inner: Arc::new(Mutex::new(TelemetryInner { - worker, - receiver, - })), - _guard: Arc::new(guard), + /// Get a clone of the channel's `Sender` used to send telemetry events. + pub(crate) fn message_sender(&self) -> mpsc::Sender { + self.message_sender.clone() } -} -/// Event generated when polling the worker. -#[derive(Debug)] -pub enum TelemetryEvent { - /// We have established a connection to one of the telemetry endpoint, either for the first - /// time or after having been disconnected earlier. - Connected, -} + /// Run the telemetry worker. + /// + /// This should be run in a background task. + pub async fn run(self) { + let Self { + mut message_receiver, + message_sender: _, + mut register_receiver, + register_sender: _, + transport, + } = self; + + let mut node_map: HashMap> = HashMap::new(); + let mut node_pool: HashMap = HashMap::new(); -impl Stream for Telemetry { - type Item = TelemetryEvent; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let before = Instant::now(); - - // Because the `Telemetry` is cloneable, we need to put the actual fields behind a `Mutex`. - // However, the user is only ever supposed to poll from one instance of `Telemetry`, while - // the other instances are used only for RAII purposes. - // We assume that the user is following this advice and therefore that the `Mutex` is only - // ever locked once at a time. - let mut inner = match self.inner.try_lock() { - Some(l) => l, - None => { - warn!( - target: "telemetry", - "The telemetry seems to be polled multiple times simultaneously" - ); - // Returning `Pending` here means that we may never get polled again, but this is - // ok because we're in a situation where something else is actually currently doing - // the polling. - return Poll::Pending; + loop { + futures::select! { + message = message_receiver.next() => Self::process_message( + message, + &mut node_pool, + &node_map, + ).await, + init_payload = register_receiver.next() => Self::process_register( + init_payload, + &mut node_pool, + &mut node_map, + transport.clone(), + ).await, } - }; - - let mut has_connected = false; + } + } - // The polling pattern is: poll the worker so that it processes its queue, then add one - // message from the receiver (if possible), then poll the worker again, and so on. - loop { - if let Some(worker) = inner.worker.as_mut() { - while let Poll::Ready(event) = worker.poll(cx) { - // Right now we only have one possible event. This line is here in order to not - // forget to handle any possible new event type. - let worker::TelemetryWorkerEvent::Connected = event; - has_connected = true; + async fn process_register( + input: Option, + node_pool: &mut HashMap>, + node_map: &mut HashMap>, + transport: WsTrans, + ) { + let input = input.expect("the stream is never closed; qed"); + + match input { + Register::Telemetry { + id, + endpoints, + connection_message, + } => { + let endpoints = endpoints.0; + + let connection_message = match serde_json::to_value(&connection_message) { + Ok(serde_json::Value::Object(mut value)) => { + value.insert("msg".into(), "system.connected".into()); + let mut obj = serde_json::Map::new(); + obj.insert("id".to_string(), id.into_u64().into()); + obj.insert("payload".to_string(), value.into()); + Some(obj) + } + Ok(_) => { + unreachable!("ConnectionMessage always serialize to an object; qed") + } + Err(err) => { + log::error!( + target: "telemetry", + "Could not serialize connection message: {}", + err, + ); + None + } + }; + + for (addr, verbosity) in endpoints { + log::trace!( + target: "telemetry", + "Initializing telemetry for: {:?}", + addr, + ); + node_map + .entry(id.clone()) + .or_default() + .push((verbosity, addr.clone())); + + let node = node_pool.entry(addr.clone()).or_insert_with(|| { + Node::new(transport.clone(), addr.clone(), Vec::new(), Vec::new()) + }); + + node.connection_messages.extend(connection_message.clone()); } } - - if let Poll::Ready(Some(log_entry)) = Stream::poll_next(Pin::new(&mut inner.receiver), cx) { - if let Some(worker) = inner.worker.as_mut() { - log_entry.as_record_values(|rec, val| { let _ = worker.log(rec, val); }); + Register::Notifier { + addresses, + connection_notifier, + } => { + for addr in addresses { + if let Some(node) = node_pool.get_mut(&addr) { + node.telemetry_connection_notifier + .push(connection_notifier.clone()); + } else { + log::error!( + target: "telemetry", + "Received connection notifier for unknown node ({}). This is a bug.", + addr, + ); + } } - } else { - break; } } + } - if before.elapsed() > Duration::from_millis(200) { - warn!(target: "telemetry", "Polling the telemetry took more than 200ms"); - } + // dispatch messages to the telemetry nodes + async fn process_message( + input: Option, + node_pool: &mut HashMap>, + node_map: &HashMap>, + ) { + let (id, verbosity, message) = input.expect("the stream is never closed; qed"); - if has_connected { - Poll::Ready(Some(TelemetryEvent::Connected)) + let nodes = if let Some(nodes) = node_map.get(&id) { + nodes } else { - Poll::Pending + // This is a normal error because the telemetry span is entered before the telemetry + // is initialized so it is possible that some messages in the beginning don't get + // through. + log::trace!( + target: "telemetry", + "Received telemetry log for unknown id ({:?}): {}", + id, + message, + ); + return; + }; + + for (node_max_verbosity, addr) in nodes { + if verbosity > *node_max_verbosity { + log::trace!( + target: "telemetry", + "Skipping {} for log entry with verbosity {:?}", + addr, + verbosity, + ); + continue; + } + + if let Some(node) = node_pool.get_mut(&addr) { + let _ = node.send(message.clone()).await; + } else { + log::error!( + target: "telemetry", + "Received message for unknown node ({}). This is a bug. \ + Message sent: {}", + addr, + message, + ); + } } } } -impl slog::Drain for TelemetryDrain { - type Ok = (); - type Err = (); - - fn log(&self, record: &slog::Record, values: &slog::OwnedKVList) -> Result { - let before = Instant::now(); +/// Handle to the [`TelemetryWorker`] thats allows initializing the telemetry for a Substrate node. +#[derive(Debug, Clone)] +pub struct TelemetryHandle { + message_sender: mpsc::UnboundedSender, +} - let serialized = async_record::AsyncRecord::from(record, values); - // Note: interestingly, `try_send` requires a `&mut` because it modifies some internal value, while `clone()` - // is lock-free. - if let Err(err) = self.sender.clone().try_send(serialized) { - warn!(target: "telemetry", "Ignored telemetry message because of error on channel: {:?}", err); - } +impl TelemetryHandle { + /// Initialize the telemetry with the endpoints provided in argument for the current substrate + /// node. + /// + /// This method must be called during the substrate node initialization. + /// + /// The `endpoints` argument is a collection of telemetry WebSocket servers with a corresponding + /// verbosity level. + /// + /// The `connection_message` argument is a JSON object that is sent every time the connection + /// (re-)establishes. + pub fn start_telemetry( + &mut self, + span: TelemetrySpan, + endpoints: TelemetryEndpoints, + connection_message: ConnectionMessage, + ) -> TelemetryConnectionNotifier { + let Self { message_sender } = self; + + let connection_notifier = TelemetryConnectionNotifier { + message_sender: message_sender.clone(), + addresses: endpoints.0.iter().map(|(addr, _)| addr.clone()).collect(), + }; - if before.elapsed() > Duration::from_millis(50) { - warn!(target: "telemetry", "Writing a telemetry log took more than 50ms"); + match span.0.id() { + Some(id) => { + match message_sender.unbounded_send(Register::Telemetry { + id, + endpoints, + connection_message, + }) { + Ok(()) => {} + Err(err) => error!( + target: "telemetry", + "Could not initialize telemetry: \ + the telemetry is probably already running: {}", + err, + ), + } + } + None => error!( + target: "telemetry", + "Could not initialize telemetry: the span could not be entered", + ), } - Ok(()) + connection_notifier } } -/// Translates to `slog_scope::info`, but contains an additional verbosity -/// parameter which the log record is tagged with. Additionally the verbosity -/// parameter is added to the record as a key-value pair. -#[macro_export] -macro_rules! telemetry { - ( $a:expr; $b:expr; $( $t:tt )* ) => { - $crate::with_logger(|l| { - $crate::slog::slog_info!(l, #$a, $b; $($t)* ) - }) - } +/// Used to create a stream of events with only one event: when a telemetry connection +/// (re-)establishes. +#[derive(Clone, Debug)] +pub struct TelemetryConnectionNotifier { + message_sender: mpsc::UnboundedSender, + addresses: Vec, } -#[cfg(test)] -mod telemetry_endpoints_tests { - use libp2p::Multiaddr; - use super::TelemetryEndpoints; - use super::url_to_multiaddr; - - #[test] - fn valid_endpoints() { - let endp = vec![("wss://telemetry.polkadot.io/submit/".into(), 3), ("/ip4/80.123.90.4/tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); - let mut res: Vec<(Multiaddr, u8)> = vec![]; - for (a, b) in endp.iter() { - res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b)) +impl TelemetryConnectionNotifier { + /// Get event stream for telemetry connection established events. + /// + /// This function will return an error if the telemetry has already been started by + /// [`TelemetryHandle::start_telemetry`]. + pub fn on_connect_stream(&self) -> TracingUnboundedReceiver<()> { + let (message_sender, message_receiver) = tracing_unbounded("mpsc_telemetry_on_connect"); + if let Err(err) = self.message_sender.unbounded_send(Register::Notifier { + addresses: self.addresses.clone(), + connection_notifier: message_sender, + }) { + error!( + target: "telemetry", + "Could not create a telemetry connection notifier: \ + the telemetry is probably already running: {}", + err, + ); } - assert_eq!(telem.0, res); + message_receiver } +} - #[test] - fn invalid_endpoints() { - let endp = vec![("/ip4/...80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp); - assert!(telem.is_err()); - } +#[derive(Debug)] +enum Register { + Telemetry { + id: Id, + endpoints: TelemetryEndpoints, + connection_message: ConnectionMessage, + }, + Notifier { + addresses: Vec, + connection_notifier: ConnectionNotifierSender, + }, +} - #[test] - fn valid_and_invalid_endpoints() { - let endp = vec![("/ip4/80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp); - assert!(telem.is_err()); - } +/// Report a telemetry. +/// +/// Translates to [`tracing::info`], but contains an additional verbosity parameter which the log +/// record is tagged with. Additionally the verbosity parameter is added to the record as a +/// key-value pair. +/// +/// # Example +/// +/// ```no_run +/// # use sc_telemetry::*; +/// # let authority_id = 42_u64; +/// # let set_id = (43_u64, 44_u64); +/// # let authorities = vec![45_u64]; +/// telemetry!(CONSENSUS_INFO; "afg.authority_set"; +/// "authority_id" => authority_id.to_string(), +/// "authority_set_id" => ?set_id, +/// "authorities" => authorities, +/// ); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! telemetry { + ( $verbosity:expr; $msg:expr; $( $t:tt )* ) => {{ + let verbosity: u8 = $verbosity; + match format_fields_to_json!($($t)*) { + Err(err) => { + $crate::tracing::error!( + target: "telemetry", + "Could not serialize value for telemetry: {}", + err, + ); + }, + Ok(mut json) => { + // NOTE: the span id will be added later in the JSON for the greater good + json.insert("msg".into(), $msg.into()); + let serialized_json = $crate::serde_json::to_string(&json) + .expect("contains only string keys; qed"); + $crate::tracing::info!(target: $crate::TELEMETRY_LOG_SPAN, + verbosity, + json = serialized_json.as_str(), + ); + }, + } + }}; +} + +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! format_fields_to_json { + ( $k:literal => $v:expr $(,)? $(, $($t:tt)+ )? ) => {{ + $crate::serde_json::to_value(&$v) + .map(|value| { + let mut map = $crate::serde_json::Map::new(); + map.insert($k.into(), value); + map + }) + $( + .and_then(|mut prev_map| { + format_fields_to_json!($($t)*) + .map(move |mut other_map| { + prev_map.append(&mut other_map); + prev_map + }) + }) + )* + }}; + ( $k:literal => ? $v:expr $(,)? $(, $($t:tt)+ )? ) => {{ + let mut map = $crate::serde_json::Map::new(); + map.insert($k.into(), std::format!("{:?}", &$v).into()); + $crate::serde_json::Result::Ok(map) + $( + .and_then(|mut prev_map| { + format_fields_to_json!($($t)*) + .map(move |mut other_map| { + prev_map.append(&mut other_map); + prev_map + }) + }) + )* + }}; } diff --git a/client/telemetry/src/node.rs b/client/telemetry/src/node.rs new file mode 100644 index 0000000000000..e47bc2f9634f7 --- /dev/null +++ b/client/telemetry/src/node.rs @@ -0,0 +1,286 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::prelude::*; +use libp2p::core::transport::Transport; +use libp2p::Multiaddr; +use rand::Rng as _; +use std::{fmt, mem, pin::Pin, task::Context, task::Poll, time::Duration}; +use wasm_timer::Delay; + +pub(crate) type ConnectionNotifierSender = sp_utils::mpsc::TracingUnboundedSender<()>; + +/// Handler for a single telemetry node. +/// +/// This is a wrapper `Sink` around a network `Sink` with 3 particularities: +/// - It is infallible: if the connection stops, it will reconnect automatically when the server +/// becomes available again. +/// - It holds a list of "connection messages" which are sent automatically when the connection is +/// (re-)established. This is used for the "system.connected" message that needs to be send for +/// every substrate node that connects. +/// - It doesn't stay in pending while waiting for connection. Instead, it moves data into the +/// void if the connection could not be established. This is important for the `Dispatcher` +/// `Sink` which we don't want to block if one connection is broken. +#[derive(Debug)] +pub(crate) struct Node { + /// Address of the node. + addr: Multiaddr, + /// State of the connection. + socket: NodeSocket, + /// Transport used to establish new connections. + transport: TTrans, + /// Messages that are sent when the connection (re-)establishes. + pub(crate) connection_messages: Vec>, + /// Notifier for when the connection (re-)establishes. + pub(crate) telemetry_connection_notifier: Vec, +} + +enum NodeSocket { + /// We're connected to the node. This is the normal state. + Connected(NodeSocketConnected), + /// We are currently dialing the node. + Dialing(TTrans::Dial), + /// A new connection should be started as soon as possible. + ReconnectNow, + /// Waiting before attempting to dial again. + WaitingReconnect(Delay), + /// Temporary transition state. + Poisoned, +} + +impl NodeSocket { + fn wait_reconnect() -> NodeSocket { + let random_delay = rand::thread_rng().gen_range(5, 10); + let delay = Delay::new(Duration::from_secs(random_delay)); + NodeSocket::WaitingReconnect(delay) + } +} + +struct NodeSocketConnected { + /// Where to send data. + sink: TTrans::Output, + /// Queue of packets to send before accepting new packets. + buf: Vec>, +} + +impl Node { + /// Builds a new node handler. + pub(crate) fn new( + transport: TTrans, + addr: Multiaddr, + connection_messages: Vec>, + telemetry_connection_notifier: Vec, + ) -> Self { + Node { + addr, + socket: NodeSocket::ReconnectNow, + transport, + connection_messages, + telemetry_connection_notifier, + } + } +} + +impl Node +where + TTrans: Clone + Unpin, + TTrans::Dial: Unpin, + TTrans::Output: + Sink, Error = TSinkErr> + Stream, TSinkErr>> + Unpin, + TSinkErr: fmt::Debug, +{ + // NOTE: this code has been inspired from `Buffer` (`futures_util::sink::Buffer`). + // https://docs.rs/futures-util/0.3.8/src/futures_util/sink/buffer.rs.html#32 + fn try_send_connection_messages( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + conn: &mut NodeSocketConnected, + ) -> Poll> { + while let Some(item) = conn.buf.pop() { + if let Err(e) = conn.sink.start_send_unpin(item) { + return Poll::Ready(Err(e)); + } + futures::ready!(conn.sink.poll_ready_unpin(cx))?; + } + Poll::Ready(Ok(())) + } +} + +pub(crate) enum Infallible {} + +impl Sink for Node +where + TTrans: Clone + Unpin, + TTrans::Dial: Unpin, + TTrans::Output: + Sink, Error = TSinkErr> + Stream, TSinkErr>> + Unpin, + TSinkErr: fmt::Debug, +{ + type Error = Infallible; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut socket = mem::replace(&mut self.socket, NodeSocket::Poisoned); + self.socket = loop { + match socket { + NodeSocket::Connected(mut conn) => match conn.sink.poll_ready_unpin(cx) { + Poll::Ready(Ok(())) => { + match self.as_mut().try_send_connection_messages(cx, &mut conn) { + Poll::Ready(Err(err)) => { + log::warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + } + Poll::Ready(Ok(())) => { + self.socket = NodeSocket::Connected(conn); + return Poll::Ready(Ok(())); + } + Poll::Pending => { + self.socket = NodeSocket::Connected(conn); + return Poll::Pending; + } + } + } + Poll::Ready(Err(err)) => { + log::warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + } + Poll::Pending => { + self.socket = NodeSocket::Connected(conn); + return Poll::Pending; + } + }, + NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { + Poll::Ready(Ok(sink)) => { + log::debug!(target: "telemetry", "✅ Connected to {}", self.addr); + + for sender in self.telemetry_connection_notifier.iter_mut() { + let _ = sender.send(()); + } + + let buf = self + .connection_messages + .iter() + .map(|json| { + let mut json = json.clone(); + json.insert( + "ts".to_string(), + chrono::Local::now().to_rfc3339().into(), + ); + json + }) + .filter_map(|json| match serde_json::to_vec(&json) { + Ok(message) => Some(message), + Err(err) => { + log::error!( + target: "telemetry", + "An error occurred while generating new connection \ + messages: {}", + err, + ); + None + } + }) + .collect(); + + socket = NodeSocket::Connected(NodeSocketConnected { sink, buf }); + } + Poll::Pending => break NodeSocket::Dialing(s), + Poll::Ready(Err(err)) => { + log::warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + } + }, + NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { + Ok(d) => { + log::debug!(target: "telemetry", "Started dialing {}", self.addr); + socket = NodeSocket::Dialing(d); + } + Err(err) => { + log::warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + } + }, + NodeSocket::WaitingReconnect(mut s) => { + if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { + socket = NodeSocket::ReconnectNow; + } else { + break NodeSocket::WaitingReconnect(s); + } + } + NodeSocket::Poisoned => { + log::error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); + break NodeSocket::Poisoned; + } + } + }; + + // The Dispatcher blocks when the Node sinks blocks. This is why it is important that the + // Node sinks doesn't go into "Pending" state while waiting for reconnection but rather + // discard the excess of telemetry messages. + Poll::Ready(Ok(())) + } + + fn start_send(mut self: Pin<&mut Self>, item: String) -> Result<(), Self::Error> { + match &mut self.socket { + NodeSocket::Connected(conn) => { + let _ = conn.sink.start_send_unpin(item.into()).expect("boo"); + } + _socket => { + log::trace!( + target: "telemetry", + "Message has been discarded: {}", + item, + ); + } + } + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.socket { + NodeSocket::Connected(conn) => match conn.sink.poll_flush_unpin(cx) { + Poll::Ready(Err(_)) => { + self.socket = NodeSocket::wait_reconnect(); + Poll::Ready(Ok(())) + } + Poll::Ready(Ok(())) => Poll::Ready(Ok(())), + Poll::Pending => Poll::Pending, + }, + _ => Poll::Ready(Ok(())), + } + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.socket { + NodeSocket::Connected(conn) => conn.sink.poll_close_unpin(cx).map(|_| Ok(())), + _ => Poll::Ready(Ok(())), + } + } +} + +impl fmt::Debug for NodeSocket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use NodeSocket::*; + f.write_str(match self { + Connected(_) => "Connected", + Dialing(_) => "Dialing", + ReconnectNow => "ReconnectNow", + WaitingReconnect(_) => "WaitingReconnect", + Poisoned => "Poisoned", + }) + } +} diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs new file mode 100644 index 0000000000000..e32a29d9a950d --- /dev/null +++ b/client/telemetry/src/transport.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::{ + prelude::*, + ready, + task::{Context, Poll}, +}; +use libp2p::{ + core::transport::{timeout::TransportTimeout, OptionalTransport}, + wasm_ext, Transport, +}; +use std::io; +use std::pin::Pin; +use std::time::Duration; + +/// Timeout after which a connection attempt is considered failed. Includes the WebSocket HTTP +/// upgrading. +const CONNECT_TIMEOUT: Duration = Duration::from_secs(20); + +pub(crate) fn initialize_transport( + wasm_external_transport: Option, +) -> Result { + let transport = match wasm_external_transport.clone() { + Some(t) => OptionalTransport::some(t), + None => OptionalTransport::none(), + } + .map((|inner, _| StreamSink::from(inner)) as fn(_, _) -> _); + + // The main transport is the `wasm_external_transport`, but if we're on desktop we add + // support for TCP+WebSocket+DNS as a fallback. In practice, you're not expected to pass + // an external transport on desktop and the fallback is used all the time. + #[cfg(not(target_os = "unknown"))] + let transport = transport.or_transport({ + let inner = libp2p::dns::DnsConfig::new(libp2p::tcp::TcpConfig::new())?; + libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { + let connec = connec + .with(|item| { + let item = libp2p::websocket::framed::OutgoingData::Binary(item); + future::ready(Ok::<_, io::Error>(item)) + }) + .try_filter(|item| future::ready(item.is_data())) + .map_ok(|data| data.into_bytes()); + future::ready(Ok::<_, io::Error>(connec)) + }) + }); + + Ok(TransportTimeout::new( + transport.map(|out, _| { + let out = out + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .sink_map_err(|err| io::Error::new(io::ErrorKind::Other, err)); + Box::pin(out) as Pin> + }), + CONNECT_TIMEOUT, + ) + .boxed()) +} + +/// A trait that implements `Stream` and `Sink`. +pub(crate) trait StreamAndSink: Stream + Sink {} +impl, I> StreamAndSink for T {} + +/// A type alias for the WebSocket transport. +pub(crate) type WsTrans = libp2p::core::transport::Boxed< + Pin< + Box< + dyn StreamAndSink, Item = Result, io::Error>, Error = io::Error> + Send, + >, + >, +>; + +/// Wraps around an `AsyncWrite` and implements `Sink`. Guarantees that each item being sent maps +/// to one call of `write`. +/// +/// For some context, we put this object around the `wasm_ext::ExtTransport` in order to make sure +/// that each telemetry message maps to one single call to `write` in the WASM FFI. +#[pin_project::pin_project] +pub(crate) struct StreamSink(#[pin] T, Option>); + +impl From for StreamSink { + fn from(inner: T) -> StreamSink { + StreamSink(inner, None) + } +} + +impl Stream for StreamSink { + type Item = Result, io::Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + let mut buf = vec![0; 128]; + match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { + Ok(0) => Poll::Ready(None), + Ok(n) => { + buf.truncate(n); + Poll::Ready(Some(Ok(buf))) + } + Err(err) => Poll::Ready(Some(Err(err))), + } + } +} + +impl StreamSink { + fn poll_flush_buffer(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + + if let Some(buffer) = this.1 { + if ready!(this.0.poll_write(cx, &buffer[..]))? != buffer.len() { + log::error!(target: "telemetry", + "Detected some internal buffering happening in the telemetry"); + let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); + return Poll::Ready(Err(err)); + } + } + + *this.1 = None; + Poll::Ready(Ok(())) + } +} + +impl Sink> for StreamSink { + type Error = io::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(StreamSink::poll_flush_buffer(self, cx))?; + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { + let this = self.project(); + debug_assert!(this.1.is_none()); + *this.1 = Some(item); + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.as_mut().poll_flush_buffer(cx))?; + let this = self.project(); + AsyncWrite::poll_flush(this.0, cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.as_mut().poll_flush_buffer(cx))?; + let this = self.project(); + AsyncWrite::poll_close(this.0, cx) + } +} diff --git a/client/telemetry/src/worker.rs b/client/telemetry/src/worker.rs deleted file mode 100644 index a01ab89e7dde7..0000000000000 --- a/client/telemetry/src/worker.rs +++ /dev/null @@ -1,263 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Contains the object that makes the telemetry work. -//! -//! # Usage -//! -//! - Create a `TelemetryWorker` with `TelemetryWorker::new`. -//! - Send messages to the telemetry with `TelemetryWorker::send_message`. Messages will only be -//! sent to the appropriate targets. Messages may be ignored if the target happens to be -//! temporarily unreachable. -//! - You must appropriately poll the worker with `TelemetryWorker::poll`. Polling will/may produce -//! events indicating what happened since the latest polling. -//! - -use futures::{prelude::*, ready}; -use libp2p::{ - core::transport::{OptionalTransport, timeout::TransportTimeout}, - Multiaddr, - Transport, - wasm_ext -}; -use log::{trace, warn, error}; -use slog::Drain; -use std::{io, pin::Pin, task::Context, task::Poll, time}; - -mod node; - -/// Timeout after which a connection attempt is considered failed. Includes the WebSocket HTTP -/// upgrading. -const CONNECT_TIMEOUT: time::Duration = time::Duration::from_secs(20); - -/// Event generated when polling the worker. -#[derive(Debug)] -pub enum TelemetryWorkerEvent { - /// We have established a connection to one of the telemetry endpoint, either for the first - /// time or after having been disconnected earlier. - Connected, -} - -/// Telemetry processing machine. -#[derive(Debug)] -pub struct TelemetryWorker { - /// List of nodes with their maximum verbosity level. - nodes: Vec<(node::Node, u8)>, -} - -trait StreamAndSink: Stream + Sink {} -impl, I> StreamAndSink for T {} - -type WsTrans = libp2p::core::transport::Boxed< - Pin, - Item = Result, io::Error>, - Error = io::Error - > + Send>> ->; - -impl TelemetryWorker { - /// Builds a new `TelemetryWorker`. - /// - /// The endpoints must be a list of targets, plus a verbosity level. When you send a message - /// to the telemetry, only the targets whose verbosity is higher than the verbosity of the - /// message will receive it. - pub fn new( - endpoints: impl IntoIterator, - wasm_external_transport: impl Into> - ) -> Result { - let transport = match wasm_external_transport.into() { - Some(t) => OptionalTransport::some(t), - None => OptionalTransport::none() - }.map((|inner, _| StreamSink::from(inner)) as fn(_, _) -> _); - - // The main transport is the `wasm_external_transport`, but if we're on desktop we add - // support for TCP+WebSocket+DNS as a fallback. In practice, you're not expected to pass - // an external transport on desktop and the fallback is used all the time. - #[cfg(not(target_os = "unknown"))] - let transport = transport.or_transport({ - let inner = libp2p::dns::DnsConfig::new(libp2p::tcp::TcpConfig::new())?; - libp2p::websocket::framed::WsConfig::new(inner) - .and_then(|connec, _| { - let connec = connec - .with(|item| { - let item = libp2p::websocket::framed::OutgoingData::Binary(item); - future::ready(Ok::<_, io::Error>(item)) - }) - .try_filter(|item| future::ready(item.is_data())) - .map_ok(|data| data.into_bytes()); - future::ready(Ok::<_, io::Error>(connec)) - }) - }); - - let transport = TransportTimeout::new( - transport.map(|out, _| { - let out = out - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .sink_map_err(|err| io::Error::new(io::ErrorKind::Other, err)); - Box::pin(out) as Pin> - }), - CONNECT_TIMEOUT - ).boxed(); - - Ok(TelemetryWorker { - nodes: endpoints.into_iter().map(|(addr, verbosity)| { - let node = node::Node::new(transport.clone(), addr); - (node, verbosity) - }).collect() - }) - } - - /// Polls the worker for events that happened. - pub fn poll(&mut self, cx: &mut Context) -> Poll { - for (node, _) in &mut self.nodes { - loop { - match node::Node::poll(Pin::new(node), cx) { - Poll::Ready(node::NodeEvent::Connected) => - return Poll::Ready(TelemetryWorkerEvent::Connected), - Poll::Ready(node::NodeEvent::Disconnected(_)) => continue, - Poll::Pending => break, - } - } - } - - Poll::Pending - } - - /// Equivalent to `slog::Drain::log`, but takes `self` by `&mut` instead, which is more convenient. - /// - /// Keep in mind that you should call `TelemetryWorker::poll` in order to process the messages. - /// You should call this function right after calling `slog::Drain::log`. - pub fn log(&mut self, record: &slog::Record, values: &slog::OwnedKVList) -> Result<(), ()> { - let msg_verbosity = match record.tag().parse::() { - Ok(v) => v, - Err(err) => { - warn!(target: "telemetry", "Failed to parse telemetry tag {:?}: {:?}", - record.tag(), err); - return Err(()) - } - }; - - // None of the nodes want that verbosity, so just return without doing any serialization. - if self.nodes.iter().all(|(_, node_max_verbosity)| msg_verbosity > *node_max_verbosity) { - trace!( - target: "telemetry", - "Skipping log entry because verbosity {:?} is too high for all endpoints", - msg_verbosity - ); - return Ok(()) - } - - // Turn the message into JSON. - let serialized = { - let mut out = Vec::new(); - slog_json::Json::default(&mut out).log(record, values).map_err(|_| ())?; - out - }; - - for (node, node_max_verbosity) in &mut self.nodes { - if msg_verbosity > *node_max_verbosity { - trace!(target: "telemetry", "Skipping {:?} for log entry with verbosity {:?}", - node.addr(), msg_verbosity); - continue; - } - - // `send_message` returns an error if we're not connected, which we silently ignore. - let _ = node.send_message(&serialized.clone()[..]); - } - - Ok(()) - } -} - -/// Wraps around an `AsyncWrite` and implements `Sink`. Guarantees that each item being sent maps -/// to one call of `write`. -/// -/// For some context, we put this object around the `wasm_ext::ExtTransport` in order to make sure -/// that each telemetry message maps to one single call to `write` in the WASM FFI. -#[pin_project::pin_project] -struct StreamSink(#[pin] T, Option>); - -impl From for StreamSink { - fn from(inner: T) -> StreamSink { - StreamSink(inner, None) - } -} - -impl Stream for StreamSink { - type Item = Result, io::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let this = self.project(); - let mut buf = vec![0; 128]; - match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { - Ok(0) => Poll::Ready(None), - Ok(n) => { - buf.truncate(n); - Poll::Ready(Some(Ok(buf))) - }, - Err(err) => Poll::Ready(Some(Err(err))), - } - } -} - -impl StreamSink { - fn poll_flush_buffer(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let this = self.project(); - - if let Some(buffer) = this.1 { - if ready!(this.0.poll_write(cx, &buffer[..]))? != buffer.len() { - error!(target: "telemetry", - "Detected some internal buffering happening in the telemetry"); - let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); - return Poll::Ready(Err(err)); - } - } - - *this.1 = None; - Poll::Ready(Ok(())) - } -} - -impl Sink> for StreamSink { - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(StreamSink::poll_flush_buffer(self, cx))?; - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - let this = self.project(); - debug_assert!(this.1.is_none()); - *this.1 = Some(item); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - let this = self.project(); - AsyncWrite::poll_flush(this.0, cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - let this = self.project(); - AsyncWrite::poll_close(this.0, cx) - } -} diff --git a/client/telemetry/src/worker/node.rs b/client/telemetry/src/worker/node.rs deleted file mode 100644 index eef7ca7e81553..0000000000000 --- a/client/telemetry/src/worker/node.rs +++ /dev/null @@ -1,305 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Contains the `Node` struct, which handles communications with a single telemetry endpoint. - -use futures::prelude::*; -use futures_timer::Delay; -use libp2p::Multiaddr; -use libp2p::core::transport::Transport; -use log::{trace, debug, warn, error}; -use rand::Rng as _; -use std::{collections::VecDeque, fmt, mem, pin::Pin, task::Context, task::Poll, time::Duration}; - -/// Maximum number of pending telemetry messages. -const MAX_PENDING: usize = 10; - -/// Handler for a single telemetry node. -pub struct Node { - /// Address of the node. - addr: Multiaddr, - /// State of the connection. - socket: NodeSocket, - /// Transport used to establish new connections. - transport: TTrans, -} - -enum NodeSocket { - /// We're connected to the node. This is the normal state. - Connected(NodeSocketConnected), - /// We are currently dialing the node. - Dialing(TTrans::Dial), - /// A new connection should be started as soon as possible. - ReconnectNow, - /// Waiting before attempting to dial again. - WaitingReconnect(Delay), - /// Temporary transition state. - Poisoned, -} - -struct NodeSocketConnected { - /// Where to send data. - sink: TTrans::Output, - /// Queue of packets to send. - pending: VecDeque>, - /// If true, we need to flush the sink. - need_flush: bool, - /// A timeout for the socket to write data. - timeout: Option, -} - -/// Event that can happen with this node. -#[derive(Debug)] -pub enum NodeEvent { - /// We are now connected to this node. - Connected, - /// We are now disconnected from this node. - Disconnected(ConnectionError), -} - -/// Reason for disconnecting from a node. -#[derive(Debug)] -pub enum ConnectionError { - /// The connection timed-out. - Timeout, - /// Reading from the socket returned and end-of-file, indicating that the socket has been - /// closed. - Closed, - /// The sink errored. - Sink(TSinkErr), -} - -impl Node { - /// Builds a new node handler. - pub fn new(transport: TTrans, addr: Multiaddr) -> Self { - Node { - addr, - socket: NodeSocket::ReconnectNow, - transport, - } - } - - /// Returns the address that was passed to `new`. - pub fn addr(&self) -> &Multiaddr { - &self.addr - } -} - -impl Node -where TTrans: Clone + Unpin, TTrans::Dial: Unpin, - TTrans::Output: Sink, Error = TSinkErr> - + Stream, TSinkErr>> - + Unpin, - TSinkErr: fmt::Debug -{ - /// Sends a WebSocket frame to the node. Returns an error if we are not connected to the node. - /// - /// After calling this method, you should call `poll` in order for it to be properly processed. - pub fn send_message(&mut self, payload: impl Into>) -> Result<(), ()> { - if let NodeSocket::Connected(NodeSocketConnected { pending, .. }) = &mut self.socket { - if pending.len() <= MAX_PENDING { - trace!(target: "telemetry", "Adding log entry to queue for {:?}", self.addr); - pending.push_back(payload.into()); - Ok(()) - } else { - warn!(target: "telemetry", "⚠️ Rejected log entry because queue is full for {:?}", - self.addr); - Err(()) - } - } else { - Err(()) - } - } - - /// Polls the node for updates. Must be performed regularly. - pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut socket = mem::replace(&mut self.socket, NodeSocket::Poisoned); - self.socket = loop { - match socket { - NodeSocket::Connected(mut conn) => { - match NodeSocketConnected::poll(Pin::new(&mut conn), cx, &self.addr) { - Poll::Ready(Ok(v)) => match v {}, - Poll::Pending => { - break NodeSocket::Connected(conn) - }, - Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - self.socket = NodeSocket::WaitingReconnect(timeout); - return Poll::Ready(NodeEvent::Disconnected(err)) - } - } - } - NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { - Poll::Ready(Ok(sink)) => { - debug!(target: "telemetry", "✅ Connected to {}", self.addr); - let conn = NodeSocketConnected { - sink, - pending: VecDeque::new(), - need_flush: false, - timeout: None, - }; - self.socket = NodeSocket::Connected(conn); - return Poll::Ready(NodeEvent::Connected) - }, - Poll::Pending => break NodeSocket::Dialing(s), - Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - socket = NodeSocket::WaitingReconnect(timeout); - } - } - NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { - Ok(d) => { - debug!(target: "telemetry", "Started dialing {}", self.addr); - socket = NodeSocket::Dialing(d); - } - Err(err) => { - warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - socket = NodeSocket::WaitingReconnect(timeout); - } - } - NodeSocket::WaitingReconnect(mut s) => - if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { - socket = NodeSocket::ReconnectNow; - } else { - break NodeSocket::WaitingReconnect(s) - } - NodeSocket::Poisoned => { - error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); - break NodeSocket::Poisoned - } - } - }; - - Poll::Pending - } -} - -/// Generates a `Delay` object with a random timeout. -/// -/// If there are general connection issues, not all endpoints should be synchronized in their -/// re-connection time. -fn gen_rand_reconnect_delay() -> Delay { - let random_delay = rand::thread_rng().gen_range(5, 10); - Delay::new(Duration::from_secs(random_delay)) -} - -impl NodeSocketConnected -where TTrans::Output: Sink, Error = TSinkErr> - + Stream, TSinkErr>> - + Unpin -{ - /// Processes the queue of messages for the connected socket. - /// - /// The address is passed for logging purposes only. - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context, - my_addr: &Multiaddr, - ) -> Poll>> { - - while let Some(item) = self.pending.pop_front() { - if let Poll::Ready(result) = Sink::poll_ready(Pin::new(&mut self.sink), cx) { - if let Err(err) = result { - return Poll::Ready(Err(ConnectionError::Sink(err))) - } - - let item_len = item.len(); - if let Err(err) = Sink::start_send(Pin::new(&mut self.sink), item) { - return Poll::Ready(Err(ConnectionError::Sink(err))) - } - trace!( - target: "telemetry", "Successfully sent {:?} bytes message to {}", - item_len, my_addr - ); - self.need_flush = true; - - } else { - self.pending.push_front(item); - if self.timeout.is_none() { - self.timeout = Some(Delay::new(Duration::from_secs(10))); - } - break; - } - } - - if self.need_flush { - match Sink::poll_flush(Pin::new(&mut self.sink), cx) { - Poll::Pending => { - if self.timeout.is_none() { - self.timeout = Some(Delay::new(Duration::from_secs(10))); - } - }, - Poll::Ready(Err(err)) => { - self.timeout = None; - return Poll::Ready(Err(ConnectionError::Sink(err))) - }, - Poll::Ready(Ok(())) => { - self.timeout = None; - self.need_flush = false; - }, - } - } - - if let Some(timeout) = self.timeout.as_mut() { - match Future::poll(Pin::new(timeout), cx) { - Poll::Pending => {}, - Poll::Ready(()) => { - self.timeout = None; - return Poll::Ready(Err(ConnectionError::Timeout)) - } - } - } - - match Stream::poll_next(Pin::new(&mut self.sink), cx) { - Poll::Ready(Some(Ok(_))) => { - // We poll the telemetry `Stream` because the underlying implementation relies on - // this in order to answer PINGs. - // We don't do anything with incoming messages, however. - }, - Poll::Ready(Some(Err(err))) => { - return Poll::Ready(Err(ConnectionError::Sink(err))) - }, - Poll::Ready(None) => { - return Poll::Ready(Err(ConnectionError::Closed)) - }, - Poll::Pending => {}, - } - - Poll::Pending - } -} - -impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let state = match self.socket { - NodeSocket::Connected(_) => "Connected", - NodeSocket::Dialing(_) => "Dialing", - NodeSocket::ReconnectNow => "Pending reconnect", - NodeSocket::WaitingReconnect(_) => "Pending reconnect", - NodeSocket::Poisoned => "Poisoned", - }; - - f.debug_struct("Node") - .field("addr", &self.addr) - .field("state", &state) - .finish() - } -} diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 35db326c94929..34aa9d9d4e7f0 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-tracing" -version = "2.0.0" +version = "3.0.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,15 +13,26 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +ansi_term = "0.12.1" +atty = "0.2.13" erased-serde = "0.3.9" +lazy_static = "1.4.0" log = { version = "0.4.8" } -parking_lot = "0.10.0" +once_cell = "1.4.1" +parking_lot = "0.11.1" +regex = "1.4.2" rustc-hash = "1.1.0" serde = "1.0.101" serde_json = "1.0.41" -slog = { version = "2.5.2", features = ["nested-values"] } -tracing = "0.1.21" +thiserror = "1.0.21" +tracing = "0.1.22" tracing-core = "0.1.17" -tracing-subscriber = "0.2.13" -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } +tracing-log = "0.1.1" +tracing-subscriber = "0.2.15" +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sc-telemetry = { version = "3.0.0", path = "../telemetry" } +sc-tracing-proc-macro = { version = "3.0.0", path = "./proc-macro" } + +[target.'cfg(target_os = "unknown")'.dependencies] +wasm-bindgen = "0.2.67" +web-sys = { version = "0.3.44", features = ["console"] } diff --git a/client/cli/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml similarity index 76% rename from client/cli/proc-macro/Cargo.toml rename to client/tracing/proc-macro/Cargo.toml index 9b9d134c5a836..ac06dc45a9c40 100644 --- a/client/cli/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "sc-cli-proc-macro" -version = "2.0.0" +name = "sc-tracing-proc-macro" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "0.1.4" proc-macro2 = "1.0.6" quote = { version = "1.0.3", features = ["proc-macro"] } -syn = { version = "1.0.7", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "1.0.58", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/client/cli/proc-macro/src/lib.rs b/client/tracing/proc-macro/src/lib.rs similarity index 95% rename from client/cli/proc-macro/src/lib.rs rename to client/tracing/proc-macro/src/lib.rs index 775d1eb96ea38..6164977f07c1e 100644 --- a/client/cli/proc-macro/src/lib.rs +++ b/client/tracing/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -120,16 +120,16 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { let crate_name = if std::env::var("CARGO_PKG_NAME") .expect("cargo env var always there when compiling; qed") - == "sc-cli" + == "sc-tracing" { - Ident::new("sc_cli", Span::call_site().into()) + Ident::from(Ident::new("sc_tracing", Span::call_site())) } else { - let crate_name = match crate_name("sc-cli") { + let crate_name = match crate_name("sc-tracing") { Ok(x) => x, Err(err) => return Error::new(Span::call_site(), err).to_compile_error().into(), }; - Ident::new(&crate_name, Span::call_site().into()) + Ident::new(&crate_name, Span::call_site()) }; let ItemFn { @@ -143,7 +143,7 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { #(#attrs)* #vis #sig { let span = #crate_name::tracing::info_span!( - #crate_name::PREFIX_LOG_SPAN, + #crate_name::logging::PREFIX_LOG_SPAN, name = #name, ); let _enter = span.enter(); diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 6690f283464ea..2b0044a6f25b0 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Instrumentation implementation for substrate. //! @@ -24,10 +26,13 @@ //! //! Currently we provide `Log` (default), `Telemetry` variants for `Receiver` +#![warn(missing_docs)] + +pub mod logging; + use rustc_hash::FxHashMap; use std::fmt; use std::time::{Duration, Instant}; - use parking_lot::Mutex; use serde::ser::{Serialize, Serializer, SerializeMap}; use tracing::{ @@ -37,10 +42,16 @@ use tracing::{ span::{Attributes, Id, Record}, subscriber::Subscriber, }; -use tracing_subscriber::{CurrentSpan, layer::{Layer, Context}}; - +use tracing_subscriber::{ + CurrentSpan, + layer::{Layer, Context}, +}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; + +#[doc(hidden)] +pub use tracing; + const ZERO_DURATION: Duration = Duration::from_nanos(0); /// Responsible for assigning ids to new spans, which are not re-used. @@ -77,10 +88,15 @@ pub trait TraceHandler: Send + Sync { /// Represents a tracing event, complete with values #[derive(Debug)] pub struct TraceEvent { + /// Name of the event. pub name: &'static str, + /// Target of the event. pub target: String, + /// Level of the event. pub level: Level, + /// Values for this event. pub values: Values, + /// Id of the parent tracing event, if any. pub parent_id: Option, } @@ -190,27 +206,6 @@ impl fmt::Display for Values { } } -impl slog::SerdeValue for Values { - fn as_serde(&self) -> &dyn erased_serde::Serialize { - self - } - - fn to_sendable(&self) -> Box { - Box::new(self.clone()) - } -} - -impl slog::Value for Values { - fn serialize( - &self, - _record: &slog::Record, - key: slog::Key, - ser: &mut dyn slog::Serializer, - ) -> slog::Result { - ser.emit_serde(key, self) - } -} - impl ProfilingLayer { /// Takes a `TracingReceiver` and a comma separated list of targets, /// either with a level: "pallet=trace,frame=debug" @@ -231,15 +226,13 @@ impl ProfilingLayer { /// either with a level, eg: "pallet=trace" /// or without: "pallet" in which case the level defaults to `trace`. /// wasm_tracing indicates whether to enable wasm traces - pub fn new_with_handler(trace_handler: Box, targets: &str) - -> Self - { + pub fn new_with_handler(trace_handler: Box, targets: &str) -> Self { let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); Self { targets, trace_handler, span_data: Mutex::new(FxHashMap::default()), - current_span: Default::default() + current_span: Default::default(), } } @@ -411,7 +404,7 @@ impl TraceHandler for TelemetryTraceHandler { "target" => span_datum.target, "time" => span_datum.overall_time.as_nanos(), "id" => span_datum.id.into_u64(), - "parent_id" => span_datum.parent_id.map(|i| i.into_u64()), + "parent_id" => span_datum.parent_id.as_ref().map(|i| i.into_u64()), "values" => span_datum.values ); } @@ -420,7 +413,7 @@ impl TraceHandler for TelemetryTraceHandler { telemetry!(SUBSTRATE_INFO; "tracing.event"; "name" => event.name, "target" => event.target, - "parent_id" => event.parent_id.map(|i| i.into_u64()), + "parent_id" => event.parent_id.as_ref().map(|i| i.into_u64()), "values" => event.values ); } @@ -447,12 +440,11 @@ mod tests { } } - type TestSubscriber = tracing_subscriber::layer::Layered< - ProfilingLayer, - tracing_subscriber::fmt::Subscriber - >; - - fn setup_subscriber() -> (TestSubscriber, Arc>>, Arc>>) { + fn setup_subscriber() -> ( + impl tracing::Subscriber + Send + Sync, + Arc>>, + Arc>> + ) { let spans = Arc::new(Mutex::new(Vec::new())); let events = Arc::new(Mutex::new(Vec::new())); let handler = TestTraceHandler { @@ -461,9 +453,9 @@ mod tests { }; let layer = ProfilingLayer::new_with_handler( Box::new(handler), - "test_target" + "test_target", ); - let subscriber = tracing_subscriber::fmt().finish().with(layer); + let subscriber = tracing_subscriber::fmt().with_writer(std::io::sink).finish().with(layer); (subscriber, spans, events) } @@ -567,64 +559,76 @@ mod tests { #[test] fn test_parent_id_with_threads() { - use std::sync::mpsc; - use std::thread; - - let (sub, spans, events) = setup_subscriber(); - let _sub_guard = tracing::subscriber::set_global_default(sub); - let span1 = tracing::info_span!(target: "test_target", "test_span1"); - let _guard1 = span1.enter(); - - let (tx, rx) = mpsc::channel(); - let handle = thread::spawn(move || { - let span2 = tracing::info_span!(target: "test_target", "test_span2"); - let _guard2 = span2.enter(); - // emit event - tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); - for msg in rx.recv() { - if msg == false { - break; + use std::{sync::mpsc, thread}; + + if std::env::var("RUN_TEST_PARENT_ID_WITH_THREADS").is_err() { + let executable = std::env::current_exe().unwrap(); + let mut command = std::process::Command::new(executable); + + let res = command + .env("RUN_TEST_PARENT_ID_WITH_THREADS", "1") + .args(&["--nocapture", "test_parent_id_with_threads"]) + .output() + .unwrap() + .status; + assert!(res.success()); + } else { + let (sub, spans, events) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_global_default(sub); + let span1 = tracing::info_span!(target: "test_target", "test_span1"); + let _guard1 = span1.enter(); + + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + let span2 = tracing::info_span!(target: "test_target", "test_span2"); + let _guard2 = span2.enter(); + // emit event + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); + for msg in rx.recv() { + if msg == false { + break; + } } - } - // gard2 and span2 dropped / exited - }); + // gard2 and span2 dropped / exited + }); - // wait for Event to be dispatched and stored - while events.lock().is_empty() { - thread::sleep(Duration::from_millis(1)); - } + // wait for Event to be dispatched and stored + while events.lock().is_empty() { + thread::sleep(Duration::from_millis(1)); + } - // emit new event (will be second item in Vec) while span2 still active in other thread - tracing::event!(target: "test_target", tracing::Level::INFO, "test_event2"); + // emit new event (will be second item in Vec) while span2 still active in other thread + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event2"); - // stop thread and drop span - let _ = tx.send(false); - let _ = handle.join(); + // stop thread and drop span + let _ = tx.send(false); + let _ = handle.join(); - // wait for Span to be dispatched and stored - while spans.lock().is_empty() { - thread::sleep(Duration::from_millis(1)); + // wait for Span to be dispatched and stored + while spans.lock().is_empty() { + thread::sleep(Duration::from_millis(1)); + } + let span2 = spans.lock().remove(0); + let event1 = events.lock().remove(0); + drop(_guard1); + drop(span1); + + // emit event with no parent + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event3"); + + let span1 = spans.lock().remove(0); + let event2 = events.lock().remove(0); + + assert_eq!(event1.values.string_values.get("message").unwrap(), "test_event1"); + assert_eq!(event2.values.string_values.get("message").unwrap(), "test_event2"); + assert!(span1.parent_id.is_none()); + assert!(span2.parent_id.is_none()); + assert_eq!(span2.id, event1.parent_id.unwrap()); + assert_eq!(span1.id, event2.parent_id.unwrap()); + assert_ne!(span2.id, span1.id); + + let event3 = events.lock().remove(0); + assert!(event3.parent_id.is_none()); } - let span2 = spans.lock().remove(0); - let event1 = events.lock().remove(0); - drop(_guard1); - drop(span1); - - // emit event with no parent - tracing::event!(target: "test_target", tracing::Level::INFO, "test_event3"); - - let span1 = spans.lock().remove(0); - let event2 = events.lock().remove(0); - - assert_eq!(event1.values.string_values.get("message").unwrap(), "test_event1"); - assert_eq!(event2.values.string_values.get("message").unwrap(), "test_event2"); - assert!(span1.parent_id.is_none()); - assert!(span2.parent_id.is_none()); - assert_eq!(span2.id, event1.parent_id.unwrap()); - assert_eq!(span1.id, event2.parent_id.unwrap()); - assert_ne!(span2.id, span1.id); - - let event3 = events.lock().remove(0); - assert!(event3.parent_id.is_none()); } } diff --git a/client/tracing/src/logging/directives.rs b/client/tracing/src/logging/directives.rs new file mode 100644 index 0000000000000..39dee2b061f0a --- /dev/null +++ b/client/tracing/src/logging/directives.rs @@ -0,0 +1,123 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use once_cell::sync::OnceCell; +use parking_lot::Mutex; +use tracing_subscriber::{ + filter::Directive, fmt as tracing_fmt, fmt::time::ChronoLocal, layer, reload::Handle, + EnvFilter, Registry, +}; + +// Handle to reload the tracing log filter +static FILTER_RELOAD_HANDLE: OnceCell> = OnceCell::new(); +// Directives that are defaulted to when resetting the log filter +static DEFAULT_DIRECTIVES: OnceCell>> = OnceCell::new(); +// Current state of log filter +static CURRENT_DIRECTIVES: OnceCell>> = OnceCell::new(); + +/// Add log filter directive(s) to the defaults +/// +/// The syntax is identical to the CLI `=`: +/// +/// `sync=debug,state=trace` +pub(crate) fn add_default_directives(directives: &str) { + DEFAULT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())) + .lock() + .push(directives.to_owned()); + add_directives(directives); +} + +/// Add directives to current directives +pub fn add_directives(directives: &str) { + CURRENT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())) + .lock() + .push(directives.to_owned()); +} + +/// Parse `Directive` and add to default directives if successful. +/// +/// Ensures the supplied directive will be restored when resetting the log filter. +pub(crate) fn parse_default_directive(directive: &str) -> super::Result { + let dir = directive.parse()?; + add_default_directives(directive); + Ok(dir) +} + +/// Reload the logging filter with the supplied directives added to the existing directives +pub fn reload_filter() -> Result<(), String> { + let mut env_filter = EnvFilter::default(); + if let Some(current_directives) = CURRENT_DIRECTIVES.get() { + // Use join and then split in case any directives added together + for directive in current_directives + .lock() + .join(",") + .split(',') + .map(|d| d.parse()) + { + match directive { + Ok(dir) => env_filter = env_filter.add_directive(dir), + Err(invalid_directive) => { + log::warn!( + target: "tracing", + "Unable to parse directive while setting log filter: {:?}", + invalid_directive, + ); + } + } + } + } + env_filter = env_filter.add_directive( + "sc_tracing=trace" + .parse() + .expect("provided directive is valid"), + ); + log::debug!(target: "tracing", "Reloading log filter with: {}", env_filter); + FILTER_RELOAD_HANDLE + .get() + .ok_or("No reload handle present".to_string())? + .reload(env_filter) + .map_err(|e| format!("{}", e)) +} + +/// Resets the log filter back to the original state when the node was started. +/// +/// Includes substrate defaults and CLI supplied directives. +pub fn reset_log_filter() -> Result<(), String> { + let directive = DEFAULT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())) + .lock() + .clone(); + + *CURRENT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())) + .lock() = directive; + reload_filter() +} + +/// Initialize FILTER_RELOAD_HANDLE, only possible once +pub(crate) fn set_reload_handle(handle: Handle) { + let _ = FILTER_RELOAD_HANDLE.set(handle); +} + +// The layered Subscriber as built up in `LoggerBuilder::init()`. +// Used in the reload `Handle`. +type SCSubscriber< + N = tracing_fmt::format::DefaultFields, + E = crate::logging::EventFormat, + W = fn() -> std::io::Stderr, +> = layer::Layered, Registry>; diff --git a/client/cli/src/logging.rs b/client/tracing/src/logging/event_format.rs similarity index 55% rename from client/cli/src/logging.rs rename to client/tracing/src/logging/event_format.rs index e1fc90505b45f..37f9ed16ead7d 100644 --- a/client/cli/src/logging.rs +++ b/client/tracing/src/logging/event_format.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,51 +17,62 @@ // along with this program. If not, see . use ansi_term::Colour; -use std::fmt; -use tracing::{span::Attributes, Event, Id, Level, Subscriber}; +use regex::Regex; +use std::fmt::{self, Write}; +use tracing::{Event, Level, Subscriber}; use tracing_log::NormalizeEvent; use tracing_subscriber::{ + field::RecordFields, fmt::{ time::{FormatTime, SystemTime}, FmtContext, FormatEvent, FormatFields, }, layer::Context, - registry::LookupSpan, - Layer, + registry::{LookupSpan, SpanRef}, }; -/// Span name used for the logging prefix. See macro `sc_cli::prefix_logs_with!` -pub const PREFIX_LOG_SPAN: &str = "substrate-log-prefix"; - -pub(crate) struct EventFormat { - pub(crate) timer: T, - pub(crate) ansi: bool, - pub(crate) display_target: bool, - pub(crate) display_level: bool, - pub(crate) display_thread_name: bool, +/// A pre-configured event formatter. +pub struct EventFormat { + /// Use the given timer for log message timestamps. + pub timer: T, + /// Sets whether or not an event's target is displayed. + pub display_target: bool, + /// Sets whether or not an event's level is displayed. + pub display_level: bool, + /// Sets whether or not the name of the current thread is displayed when formatting events. + pub display_thread_name: bool, + /// Enable ANSI terminal colors for formatted output. + pub enable_color: bool, } -// NOTE: the following code took inspiration from tracing-subscriber -// -// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 -impl FormatEvent for EventFormat +impl EventFormat where - S: Subscriber + for<'a> LookupSpan<'a>, - N: for<'a> FormatFields<'a> + 'static, T: FormatTime, { - fn format_event( + // NOTE: the following code took inspiration from tracing-subscriber + // + // https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 + pub(crate) fn format_event_custom<'b, S, N>( &self, - ctx: &FmtContext, + ctx: CustomFmtContext<'b, S, N>, writer: &mut dyn fmt::Write, event: &Event, - ) -> fmt::Result { + ) -> fmt::Result + where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, + { + if event.metadata().target() == sc_telemetry::TELEMETRY_LOG_SPAN { + return Ok(()); + } + + let writer = &mut MaybeColorWriter::new(self.enable_color, writer); let normalized_meta = event.normalized_metadata(); let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); - time::write(&self.timer, writer, self.ansi)?; + time::write(&self.timer, writer, self.enable_color)?; if self.display_level { - let fmt_level = { FmtLevel::new(meta.level(), self.ansi) }; + let fmt_level = { FmtLevel::new(meta.level(), self.enable_color) }; write!(writer, "{} ", fmt_level)?; } @@ -78,82 +89,45 @@ where } } + if self.display_target { + write!(writer, "{}: ", meta.target())?; + } + // Custom code to display node name if let Some(span) = ctx.lookup_current() { let parents = span.parents(); for span in std::iter::once(span).chain(parents) { let exts = span.extensions(); - if let Some(node_name) = exts.get::() { - write!(writer, "{}", node_name.as_str())?; + if let Some(prefix) = exts.get::() { + write!(writer, "{}", prefix.as_str())?; break; } } } - if self.display_target { - write!(writer, "{}:", meta.target())?; - } ctx.format_fields(writer, event)?; - writeln!(writer) + writeln!(writer)?; + + writer.write() } } -pub(crate) struct NodeNameLayer; - -impl Layer for NodeNameLayer +// NOTE: the following code took inspiration from tracing-subscriber +// +// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 +impl FormatEvent for EventFormat where S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, + T: FormatTime, { - fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) { - let span = ctx - .span(id) - .expect("new_span has been called for this span; qed"); - - if span.name() != PREFIX_LOG_SPAN { - return; - } - - let mut extensions = span.extensions_mut(); - - if extensions.get_mut::().is_none() { - let mut s = String::new(); - let mut v = NodeNameVisitor(&mut s); - attrs.record(&mut v); - - if !s.is_empty() { - let fmt_fields = NodeName(s); - extensions.insert(fmt_fields); - } - } - } -} - -struct NodeNameVisitor<'a, W: std::fmt::Write>(&'a mut W); - -macro_rules! write_node_name { - ($method:ident, $type:ty, $format:expr) => { - fn $method(&mut self, field: &tracing::field::Field, value: $type) { - if field.name() == "name" { - write!(self.0, $format, value).expect("no way to return the err; qed"); - } - } - }; -} - -impl<'a, W: std::fmt::Write> tracing::field::Visit for NodeNameVisitor<'a, W> { - write_node_name!(record_debug, &dyn std::fmt::Debug, "[{:?}] "); - write_node_name!(record_str, &str, "[{}] "); - write_node_name!(record_i64, i64, "[{}] "); - write_node_name!(record_u64, u64, "[{}] "); - write_node_name!(record_bool, bool, "[{}] "); -} - -#[derive(Debug)] -struct NodeName(String); - -impl NodeName { - fn as_str(&self) -> &str { - self.0.as_str() + fn format_event( + &self, + ctx: &FmtContext, + writer: &mut dyn fmt::Write, + event: &Event, + ) -> fmt::Result { + self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) } } @@ -266,3 +240,95 @@ mod time { Ok(()) } } + +// NOTE: `FmtContext`'s fields are private. This enum allows us to make a `format_event` function +// that works with `FmtContext` or `Context` with `FormatFields` +#[allow(dead_code)] +pub(crate) enum CustomFmtContext<'a, S, N> { + FmtContext(&'a FmtContext<'a, S, N>), + ContextWithFormatFields(&'a Context<'a, S>, &'a N), +} + +impl<'a, S, N> FormatFields<'a> for CustomFmtContext<'a, S, N> +where + S: Subscriber + for<'lookup> LookupSpan<'lookup>, + N: for<'writer> FormatFields<'writer> + 'static, +{ + fn format_fields( + &self, + writer: &'a mut dyn fmt::Write, + fields: R, + ) -> fmt::Result { + match self { + CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.format_fields(writer, fields), + CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => { + fmt_fields.format_fields(writer, fields) + } + } + } +} + +// NOTE: the following code has been duplicated from tracing-subscriber +// +// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/fmt_layer.rs#L788 +impl<'a, S, N> CustomFmtContext<'a, S, N> +where + S: Subscriber + for<'lookup> LookupSpan<'lookup>, + N: for<'writer> FormatFields<'writer> + 'static, +{ + #[inline] + pub fn lookup_current(&self) -> Option> + where + S: for<'lookup> LookupSpan<'lookup>, + { + match self { + CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.lookup_current(), + CustomFmtContext::ContextWithFormatFields(ctx, _) => ctx.lookup_current(), + } + } +} + +/// A writer that may write to `inner_writer` with colors. +/// +/// This is used by [`EventFormat`] to kill colors when `enable_color` is `false`. +/// +/// It is required to call [`MaybeColorWriter::write`] after all writes are done, +/// because the content of these writes is buffered and will only be written to the +/// `inner_writer` at that point. +struct MaybeColorWriter<'a> { + enable_color: bool, + buffer: String, + inner_writer: &'a mut dyn fmt::Write, +} + +impl<'a> fmt::Write for MaybeColorWriter<'a> { + fn write_str(&mut self, buf: &str) -> fmt::Result { + self.buffer.push_str(buf); + Ok(()) + } +} + +impl<'a> MaybeColorWriter<'a> { + /// Creates a new instance. + fn new(enable_color: bool, inner_writer: &'a mut dyn fmt::Write) -> Self { + Self { + enable_color, + inner_writer, + buffer: String::new(), + } + } + + /// Write the buffered content to the `inner_writer`. + fn write(&mut self) -> fmt::Result { + lazy_static::lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + } + + if !self.enable_color { + let replaced = RE.replace_all(&self.buffer, ""); + self.inner_writer.write_str(&replaced) + } else { + self.inner_writer.write_str(&self.buffer) + } + } +} diff --git a/client/tracing/src/logging/layers/console_log.rs b/client/tracing/src/logging/layers/console_log.rs new file mode 100644 index 0000000000000..be992ae814235 --- /dev/null +++ b/client/tracing/src/logging/layers/console_log.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::logging::event_format::{CustomFmtContext, EventFormat}; +use std::fmt; +use tracing::{Event, Level, Subscriber}; +use tracing_subscriber::{ + fmt::{ + time::{FormatTime, SystemTime}, + FormatFields, + }, + layer::Context, + registry::LookupSpan, + Layer, +}; +use wasm_bindgen::prelude::*; + +/// A `Layer` that display logs in the browser's console. +pub struct ConsoleLogLayer { + event_format: EventFormat, + fmt_fields: N, + _inner: std::marker::PhantomData, +} + +impl ConsoleLogLayer { + /// Create a new [`ConsoleLogLayer`] using the `EventFormat` provided in argument. + pub fn new(event_format: EventFormat) -> Self { + Self { + event_format, + fmt_fields: Default::default(), + _inner: std::marker::PhantomData, + } + } +} + +// NOTE: the following code took inspiration from `EventFormat` (in this file) +impl ConsoleLogLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'writer> FormatFields<'writer> + 'static, +{ + fn format_event( + &self, + ctx: &Context<'_, S>, + writer: &mut dyn fmt::Write, + event: &Event, + ) -> fmt::Result { + self.event_format.format_event_custom( + CustomFmtContext::ContextWithFormatFields(ctx, &self.fmt_fields), + writer, + event, + ) + } +} + +// NOTE: the following code took inspiration from tracing-subscriber +// +// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/fmt_layer.rs#L717 +impl Layer for ConsoleLogLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'writer> FormatFields<'writer> + 'static, + T: FormatTime + 'static, +{ + fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { + thread_local! { + static BUF: std::cell::RefCell = std::cell::RefCell::new(String::new()); + } + + BUF.with(|buf| { + let borrow = buf.try_borrow_mut(); + let mut a; + let mut b; + let mut buf = match borrow { + Ok(buf) => { + a = buf; + &mut *a + } + _ => { + b = String::new(); + &mut b + } + }; + + if self.format_event(&ctx, &mut buf, event).is_ok() { + if !buf.is_empty() { + let meta = event.metadata(); + let level = meta.level(); + // NOTE: the following code took inspiration from tracing-subscriber + // + // https://github.com/iamcodemaker/console_log/blob/f13b5d6755/src/lib.rs#L149 + match *level { + Level::ERROR => web_sys::console::error_1(&JsValue::from(buf.as_str())), + Level::WARN => web_sys::console::warn_1(&JsValue::from(buf.as_str())), + Level::INFO => web_sys::console::info_1(&JsValue::from(buf.as_str())), + Level::DEBUG => web_sys::console::log_1(&JsValue::from(buf.as_str())), + Level::TRACE => web_sys::console::debug_1(&JsValue::from(buf.as_str())), + } + } + } + + buf.clear(); + }); + } +} diff --git a/client/tracing/src/logging/layers/mod.rs b/client/tracing/src/logging/layers/mod.rs new file mode 100644 index 0000000000000..8bda65f4c99bb --- /dev/null +++ b/client/tracing/src/logging/layers/mod.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(target_os = "unknown")] +mod console_log; +mod prefix_layer; + +#[cfg(target_os = "unknown")] +pub use console_log::*; +pub use prefix_layer::*; diff --git a/client/tracing/src/logging/layers/prefix_layer.rs b/client/tracing/src/logging/layers/prefix_layer.rs new file mode 100644 index 0000000000000..0c8f25c24100c --- /dev/null +++ b/client/tracing/src/logging/layers/prefix_layer.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use tracing::{span::Attributes, Id, Subscriber}; +use tracing_subscriber::{layer::Context, registry::LookupSpan, Layer}; + +/// Span name used for the logging prefix. See macro `sc_tracing::logging::prefix_logs_with!` +pub const PREFIX_LOG_SPAN: &str = "substrate-log-prefix"; + +/// A `Layer` that captures the prefix span ([`PREFIX_LOG_SPAN`]) which is then used by +/// [`EventFormat`] to prefix the log lines by customizable string. +/// +/// See the macro `sc_cli::prefix_logs_with!` for more details. +pub struct PrefixLayer; + +impl Layer for PrefixLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, +{ + fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) { + let span = match ctx.span(id) { + Some(span) => span, + None => { + // this shouldn't happen! + debug_assert!( + false, + "newly created span with ID {:?} did not exist in the registry; this is a bug!", + id + ); + return; + } + }; + + if span.name() != PREFIX_LOG_SPAN { + return; + } + + let mut extensions = span.extensions_mut(); + + if extensions.get_mut::().is_none() { + let mut s = String::new(); + let mut v = PrefixVisitor(&mut s); + attrs.record(&mut v); + + if !s.is_empty() { + let fmt_fields = Prefix(s); + extensions.insert(fmt_fields); + } + } + } +} + +struct PrefixVisitor<'a, W: std::fmt::Write>(&'a mut W); + +macro_rules! write_node_name { + ($method:ident, $type:ty, $format:expr) => { + fn $method(&mut self, field: &tracing::field::Field, value: $type) { + if field.name() == "name" { + let _ = write!(self.0, $format, value); + } + } + }; +} + +impl<'a, W: std::fmt::Write> tracing::field::Visit for PrefixVisitor<'a, W> { + write_node_name!(record_debug, &dyn std::fmt::Debug, "[{:?}] "); + write_node_name!(record_str, &str, "[{}] "); + write_node_name!(record_i64, i64, "[{}] "); + write_node_name!(record_u64, u64, "[{}] "); + write_node_name!(record_bool, bool, "[{}] "); +} + +#[derive(Debug)] +pub(crate) struct Prefix(String); + +impl Prefix { + pub(crate) fn as_str(&self) -> &str { + self.0.as_str() + } +} diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs new file mode 100644 index 0000000000000..5674b50cb98e2 --- /dev/null +++ b/client/tracing/src/logging/mod.rs @@ -0,0 +1,531 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate logging library. +//! +//! This crate uses tokio's [tracing](https://github.com/tokio-rs/tracing/) library for logging. + +#![warn(missing_docs)] + +mod directives; +mod event_format; +mod layers; + +pub use directives::*; +pub use sc_tracing_proc_macro::*; + +use sc_telemetry::{ExtTransport, TelemetryWorker}; +use std::io; +use tracing::Subscriber; +use tracing_subscriber::{ + fmt::time::ChronoLocal, + fmt::{ + format, FormatEvent, FormatFields, Formatter, Layer as FmtLayer, MakeWriter, + SubscriberBuilder, + }, + layer::{self, SubscriberExt}, filter::LevelFilter, + registry::LookupSpan, + EnvFilter, FmtSubscriber, Layer, Registry, +}; + +pub use event_format::*; +pub use layers::*; + +/// Logging Result typedef. +pub type Result = std::result::Result; + +/// Logging errors. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] +#[error(transparent)] +pub enum Error { + IoError(#[from] io::Error), + SetGlobalDefaultError(#[from] tracing::subscriber::SetGlobalDefaultError), + DirectiveParseError(#[from] tracing_subscriber::filter::ParseError), + SetLoggerError(#[from] tracing_log::log_tracer::SetLoggerError), +} + +macro_rules! enable_log_reloading { + ($builder:expr) => {{ + let builder = $builder.with_filter_reloading(); + let handle = builder.reload_handle(); + set_reload_handle(handle); + builder + }}; +} + +/// Common implementation to get the subscriber. +fn prepare_subscriber( + directives: &str, + profiling_targets: Option<&str>, + force_colors: Option, + telemetry_buffer_size: Option, + telemetry_external_transport: Option, + builder_hook: impl Fn( + SubscriberBuilder< + format::DefaultFields, + EventFormat, + EnvFilter, + fn() -> std::io::Stderr, + >, + ) -> SubscriberBuilder, +) -> Result<(impl Subscriber + for<'a> LookupSpan<'a>, TelemetryWorker)> +where + N: for<'writer> FormatFields<'writer> + 'static, + E: FormatEvent + 'static, + W: MakeWriter + 'static, + F: layer::Layer> + Send + Sync + 'static, + FmtLayer: layer::Layer + Send + Sync + 'static, +{ + // Accept all valid directives and print invalid ones + fn parse_user_directives(mut env_filter: EnvFilter, dirs: &str) -> Result { + for dir in dirs.split(',') { + env_filter = env_filter.add_directive(parse_default_directive(&dir)?); + } + Ok(env_filter) + } + + // Initialize filter - ensure to use `parse_default_directive` for any defaults to persist + // after log filter reloading by RPC + let mut env_filter = EnvFilter::default() + // Enable info + .add_directive(parse_default_directive("info").expect("provided directive is valid")) + // Disable info logging by default for some modules. + .add_directive(parse_default_directive("ws=off").expect("provided directive is valid")) + .add_directive(parse_default_directive("yamux=off").expect("provided directive is valid")) + .add_directive( + parse_default_directive("cranelift_codegen=off").expect("provided directive is valid"), + ) + // Set warn logging by default for some modules. + .add_directive( + parse_default_directive("cranelift_wasm=warn").expect("provided directive is valid"), + ) + .add_directive(parse_default_directive("hyper=warn").expect("provided directive is valid")); + + if let Ok(lvl) = std::env::var("RUST_LOG") { + if lvl != "" { + env_filter = parse_user_directives(env_filter, &lvl)?; + } + } + + if directives != "" { + env_filter = parse_user_directives(env_filter, directives)?; + } + + if let Some(profiling_targets) = profiling_targets { + env_filter = parse_user_directives(env_filter, profiling_targets)?; + env_filter = env_filter + .add_directive( + parse_default_directive("sc_tracing=trace").expect("provided directive is valid") + ); + } + + let max_level_hint = Layer::::max_level_hint(&env_filter); + + let max_level = match max_level_hint { + Some(LevelFilter::INFO) | None => log::LevelFilter::Info, + Some(LevelFilter::TRACE) => log::LevelFilter::Trace, + Some(LevelFilter::WARN) => log::LevelFilter::Warn, + Some(LevelFilter::ERROR) => log::LevelFilter::Error, + Some(LevelFilter::DEBUG) => log::LevelFilter::Debug, + Some(LevelFilter::OFF) => log::LevelFilter::Off, + }; + + tracing_log::LogTracer::builder() + .with_max_level(max_level) + .init()?; + + // If we're only logging `INFO` entries then we'll use a simplified logging format. + let simple = match max_level_hint { + Some(level) if level <= tracing_subscriber::filter::LevelFilter::INFO => true, + _ => false, + }; + + let enable_color = force_colors.unwrap_or_else(|| atty::is(atty::Stream::Stderr)); + let timer = ChronoLocal::with_format(if simple { + "%Y-%m-%d %H:%M:%S".to_string() + } else { + "%Y-%m-%d %H:%M:%S%.3f".to_string() + }); + + let (telemetry_layer, telemetry_worker) = + sc_telemetry::TelemetryLayer::new(telemetry_buffer_size, telemetry_external_transport)?; + let event_format = EventFormat { + timer, + display_target: !simple, + display_level: !simple, + display_thread_name: !simple, + enable_color, + }; + let builder = FmtSubscriber::builder().with_env_filter(env_filter); + + #[cfg(not(target_os = "unknown"))] + let builder = builder.with_writer(std::io::stderr as _); + + #[cfg(target_os = "unknown")] + let builder = builder.with_writer(std::io::sink); + + #[cfg(not(target_os = "unknown"))] + let builder = builder.event_format(event_format); + + #[cfg(not(target_os = "unknown"))] + let builder = builder_hook(builder); + + let subscriber = builder.finish().with(PrefixLayer).with(telemetry_layer); + + #[cfg(target_os = "unknown")] + let subscriber = subscriber.with(ConsoleLogLayer::new(event_format)); + + Ok((subscriber, telemetry_worker)) +} + +/// A builder that is used to initialize the global logger. +pub struct LoggerBuilder { + directives: String, + profiling: Option<(crate::TracingReceiver, String)>, + telemetry_buffer_size: Option, + telemetry_external_transport: Option, + log_reloading: bool, + force_colors: Option, +} + +impl LoggerBuilder { + /// Create a new [`LoggerBuilder`] which can be used to initialize the global logger. + pub fn new>(directives: S) -> Self { + Self { + directives: directives.into(), + profiling: None, + telemetry_buffer_size: None, + telemetry_external_transport: None, + log_reloading: true, + force_colors: None, + } + } + + /// Set up the profiling. + pub fn with_profiling>( + &mut self, + tracing_receiver: crate::TracingReceiver, + profiling_targets: S, + ) -> &mut Self { + self.profiling = Some((tracing_receiver, profiling_targets.into())); + self + } + + /// Wether or not to disable log reloading. + pub fn with_log_reloading(&mut self, enabled: bool) -> &mut Self { + self.log_reloading = enabled; + self + } + + /// Set a custom buffer size for the telemetry. + pub fn with_telemetry_buffer_size(&mut self, buffer_size: usize) -> &mut Self { + self.telemetry_buffer_size = Some(buffer_size); + self + } + + /// Set a custom network transport (used for the telemetry). + pub fn with_transport(&mut self, transport: ExtTransport) -> &mut Self { + self.telemetry_external_transport = Some(transport); + self + } + + /// Force enable/disable colors. + pub fn with_colors(&mut self, enable: bool) -> &mut Self { + self.force_colors = Some(enable); + self + } + + /// Initialize the global logger + /// + /// This sets various global logging and tracing instances and thus may only be called once. + pub fn init(self) -> Result { + if let Some((tracing_receiver, profiling_targets)) = self.profiling { + if self.log_reloading { + let (subscriber, telemetry_worker) = prepare_subscriber( + &self.directives, + Some(&profiling_targets), + self.force_colors, + self.telemetry_buffer_size, + self.telemetry_external_transport, + |builder| enable_log_reloading!(builder), + )?; + let profiling = crate::ProfilingLayer::new(tracing_receiver, &profiling_targets); + + tracing::subscriber::set_global_default(subscriber.with(profiling))?; + + Ok(telemetry_worker) + } else { + let (subscriber, telemetry_worker) = prepare_subscriber( + &self.directives, + Some(&profiling_targets), + self.force_colors, + self.telemetry_buffer_size, + self.telemetry_external_transport, + |builder| builder, + )?; + let profiling = crate::ProfilingLayer::new(tracing_receiver, &profiling_targets); + + tracing::subscriber::set_global_default(subscriber.with(profiling))?; + + Ok(telemetry_worker) + } + } else { + if self.log_reloading { + let (subscriber, telemetry_worker) = prepare_subscriber( + &self.directives, + None, + self.force_colors, + self.telemetry_buffer_size, + self.telemetry_external_transport, + |builder| enable_log_reloading!(builder), + )?; + + tracing::subscriber::set_global_default(subscriber)?; + + Ok(telemetry_worker) + } else { + let (subscriber, telemetry_worker) = prepare_subscriber( + &self.directives, + None, + self.force_colors, + self.telemetry_buffer_size, + self.telemetry_external_transport, + |builder| builder, + )?; + + tracing::subscriber::set_global_default(subscriber)?; + + Ok(telemetry_worker) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate as sc_tracing; + use std::{env, process::Command}; + use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata}; + + const EXPECTED_LOG_MESSAGE: &'static str = "yeah logging works as expected"; + const EXPECTED_NODE_NAME: &'static str = "THE_NODE"; + + fn init_logger(directives: &str) { + let _ = LoggerBuilder::new(directives).init().unwrap(); + } + + #[test] + fn test_logger_filters() { + if env::var("RUN_TEST_LOGGER_FILTERS").is_ok() { + let test_directives = "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; + init_logger(&test_directives); + + tracing::dispatcher::get_default(|dispatcher| { + let test_filter = |target, level| { + struct DummyCallSite; + impl Callsite for DummyCallSite { + fn set_interest(&self, _: Interest) {} + fn metadata(&self) -> &Metadata<'_> { + unreachable!(); + } + } + + let metadata = tracing::metadata!( + name: "", + target: target, + level: level, + fields: &[], + callsite: &DummyCallSite, + kind: Kind::SPAN, + ); + + dispatcher.enabled(&metadata) + }; + + assert!(test_filter("afg", Level::INFO)); + assert!(test_filter("afg", Level::DEBUG)); + assert!(!test_filter("afg", Level::TRACE)); + + assert!(test_filter("sync", Level::TRACE)); + assert!(test_filter("client", Level::WARN)); + + assert!(test_filter("telemetry", Level::TRACE)); + assert!(test_filter("something-with-dash", Level::ERROR)); + }); + } else { + let status = Command::new(env::current_exe().unwrap()) + .arg("test_logger_filters") + .env("RUN_TEST_LOGGER_FILTERS", "1") + .output() + .unwrap() + .status; + assert!(status.success()); + } + } + + /// This test ensures that using dash (`-`) in the target name in logs and directives actually + /// work. + #[test] + fn dash_in_target_name_works() { + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "log_something_with_dash_target_name"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!(output.contains(EXPECTED_LOG_MESSAGE)); + } + + /// This is not an actual test, it is used by the `dash_in_target_name_works` test. + /// The given test will call the test executable and only execute this one test that + /// only prints `EXPECTED_LOG_MESSAGE` through logging while using a target + /// name that contains a dash. This ensures that target names with dashes work. + #[test] + fn log_something_with_dash_target_name() { + if env::var("ENABLE_LOGGING").is_ok() { + let test_directives = "test-target=info"; + let _guard = init_logger(&test_directives); + + log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE); + } + } + + #[test] + fn prefix_in_log_lines() { + let re = regex::Regex::new(&format!( + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} \[{}\] {}$", + EXPECTED_NODE_NAME, EXPECTED_LOG_MESSAGE, + )) + .unwrap(); + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "prefix_in_log_lines_entrypoint"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!( + re.is_match(output.trim()), + format!("Expected:\n{}\nGot:\n{}", re, output), + ); + } + + /// This is not an actual test, it is used by the `prefix_in_log_lines` test. + /// The given test will call the test executable and only execute this one test that + /// only prints a log line prefixed by the node name `EXPECTED_NODE_NAME`. + #[test] + fn prefix_in_log_lines_entrypoint() { + if env::var("ENABLE_LOGGING").is_ok() { + let _guard = init_logger(""); + prefix_in_log_lines_process(); + } + } + + #[crate::logging::prefix_logs_with(EXPECTED_NODE_NAME)] + fn prefix_in_log_lines_process() { + log::info!("{}", EXPECTED_LOG_MESSAGE); + } + + /// This is not an actual test, it is used by the `do_not_write_with_colors_on_tty` test. + /// The given test will call the test executable and only execute this one test that + /// only prints a log line with some colors in it. + #[test] + fn do_not_write_with_colors_on_tty_entrypoint() { + if env::var("ENABLE_LOGGING").is_ok() { + let _guard = init_logger(""); + log::info!("{}", ansi_term::Colour::Yellow.paint(EXPECTED_LOG_MESSAGE)); + } + } + + #[test] + fn do_not_write_with_colors_on_tty() { + let re = regex::Regex::new(&format!( + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} {}$", + EXPECTED_LOG_MESSAGE, + )) + .unwrap(); + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "do_not_write_with_colors_on_tty_entrypoint"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!( + re.is_match(output.trim()), + format!("Expected:\n{}\nGot:\n{}", re, output), + ); + } + + #[test] + fn log_max_level_is_set_properly() { + fn run_test(rust_log: Option, tracing_targets: Option) -> String { + let executable = env::current_exe().unwrap(); + let mut command = Command::new(executable); + + command + .env("PRINT_MAX_LOG_LEVEL", "1") + .args(&["--nocapture", "log_max_level_is_set_properly"]); + + if let Some(rust_log) = rust_log { + command.env("RUST_LOG", rust_log); + } + + if let Some(tracing_targets) = tracing_targets { + command.env("TRACING_TARGETS", tracing_targets); + } + + let output = command.output().unwrap(); + + dbg!(String::from_utf8(output.stderr)).unwrap() + } + + if env::var("PRINT_MAX_LOG_LEVEL").is_ok() { + let mut builder = LoggerBuilder::new(""); + + if let Ok(targets) = env::var("TRACING_TARGETS") { + builder.with_profiling(crate::TracingReceiver::Log, targets); + } + + builder.init().unwrap(); + + eprint!("MAX_LOG_LEVEL={:?}", log::max_level()); + } else { + assert_eq!("MAX_LOG_LEVEL=Info", run_test(None, None)); + assert_eq!( + "MAX_LOG_LEVEL=Trace", + run_test(Some("test=trace".into()), None) + ); + assert_eq!( + "MAX_LOG_LEVEL=Debug", + run_test(Some("test=debug".into()), None) + ); + assert_eq!( + "MAX_LOG_LEVEL=Trace", + run_test(None, Some("test=info".into())) + ); + } + } +} diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 5db37f5368387..d457d709d1222 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-pool" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,31 +13,31 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -derive_more = "0.99.2" +codec = { package = "parity-scale-codec", version = "2.0.0" } +thiserror = "1.0.21" futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" intervalier = "0.4.0" log = "0.4.8" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -parking_lot = "0.10.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sc-client-api = { version = "2.0.0", path = "../api" } -sc-transaction-graph = { version = "2.0.0", path = "./graph" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } +parking_lot = "0.11.1" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +sc-client-api = { version = "3.0.0", path = "../api" } +sc-transaction-graph = { version = "3.0.0", path = "./graph" } +sp-api = { version = "3.0.0", path = "../../primitives/api" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-utils = { version = "3.0.0", path = "../../primitives/utils" } wasm-timer = "0.2" [dev-dependencies] assert_matches = "1.3.0" hex = "0.4" -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sc-block-builder = { version = "0.9.0", path = "../block-builder" } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index c5850e765fcfa..7ed455f9370c5 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-graph" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,23 +14,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -futures = "0.3.4" +thiserror = "1.0.21" +futures = "0.3.9" log = "0.4.8" -parking_lot = "0.10.0" +parking_lot = "0.11.1" serde = { version = "1.0.101", features = ["derive"] } wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-utils = { version = "2.0.0", path = "../../../primitives/utils" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-utils = { version = "3.0.0", path = "../../../primitives/utils" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" -retain_mut = "0.1.1" +retain_mut = "0.1.2" [dev-dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } criterion = "0.3" diff --git a/client/transaction-pool/graph/benches/basics.rs b/client/transaction-pool/graph/benches/basics.rs index bb10086bd4a55..21e3d1006d5df 100644 --- a/client/transaction-pool/graph/benches/basics.rs +++ b/client/transaction-pool/graph/benches/basics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -164,13 +164,19 @@ fn benchmark_main(c: &mut Criterion) { c.bench_function("sequential 50 tx", |b| { b.iter(|| { - bench_configured(Pool::new(Default::default(), TestApi::new_dependant().into()), 50); + bench_configured( + Pool::new(Default::default(), true.into(), TestApi::new_dependant().into()), + 50, + ); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { - bench_configured(Pool::new(Default::default(), TestApi::default().into()), 100); + bench_configured( + Pool::new(Default::default(), true.into(), TestApi::default().into()), + 100, + ); }); }); } diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index 81d8e802c2c9e..445ef0adaf7b7 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/graph/src/error.rs b/client/transaction-pool/graph/src/error.rs deleted file mode 100644 index 392ddaa39be6f..0000000000000 --- a/client/transaction-pool/graph/src/error.rs +++ /dev/null @@ -1,81 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Transaction pool errors. - -use sp_runtime::transaction_validity::{ - TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, -}; - -/// Transaction pool result. -pub type Result = std::result::Result; - -/// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] - UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] - InvalidTransaction(InvalidTransaction), - /// The transaction validity returned no "provides" tag. - /// - /// Such transactions are not accepted to the pool, since we use those tags - /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] - NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] - TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] - AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] - TooLowPriority { - /// Transaction already in the pool. - old: Priority, - /// Transaction entering the pool. - new: Priority - }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] - CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] - ImmediatelyDropped, - /// Invalid block id. - InvalidBlockId(String), -} - -impl std::error::Error for Error {} - -/// Transaction pool error conversion. -pub trait IntoPoolError: ::std::error::Error + Send + Sized { - /// Try to extract original `Error` - /// - /// This implementation is optional and used only to - /// provide more descriptive error messages for end users - /// of RPC API. - fn into_pool_error(self) -> ::std::result::Result { Err(self) } -} - -impl IntoPoolError for Error { - fn into_pool_error(self) -> ::std::result::Result { Ok(self) } -} diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/graph/src/future.rs index 80e6825d4ff9d..98d49817e32a8 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/graph/src/future.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/graph/src/lib.rs b/client/transaction-pool/graph/src/lib.rs index bf220ce22973a..c61b05befa12f 100644 --- a/client/transaction-pool/graph/src/lib.rs +++ b/client/transaction-pool/graph/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -39,6 +39,6 @@ pub mod watcher; pub use self::base_pool::Transaction; pub use self::pool::{ - Pool, Options, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, - BlockHash, NumberFor, TransactionFor, ValidatedTransaction, + BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, IsValidator, NumberFor, Options, + Pool, TransactionFor, ValidatedTransaction, }; diff --git a/client/transaction-pool/graph/src/listener.rs b/client/transaction-pool/graph/src/listener.rs index 1bc3720fa6b85..d707c0a0f802f 100644 --- a/client/transaction-pool/graph/src/listener.rs +++ b/client/transaction-pool/graph/src/listener.rs @@ -1,7 +1,7 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index 56ff550d7754f..eee14049d41a6 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -36,7 +36,7 @@ use wasm_timer::Instant; use futures::channel::mpsc::Receiver; use crate::validated_pool::ValidatedPool; -pub use crate::validated_pool::ValidatedTransaction; +pub use crate::validated_pool::{IsValidator, ValidatedTransaction}; /// Modification notification event stream type; pub type EventStream = Receiver; @@ -150,9 +150,9 @@ where impl Pool { /// Create a new transaction pool. - pub fn new(options: Options, api: Arc) -> Self { + pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { Pool { - validated_pool: Arc::new(ValidatedPool::new(options, api)), + validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)), } } @@ -497,43 +497,58 @@ mod tests { ) -> Self::ValidationFuture { let hash = self.hash_and_length(&uxt).0; let block_number = self.block_id_to_number(at).unwrap().unwrap(); - let nonce = uxt.transfer().nonce; - - // This is used to control the test flow. - if nonce > 0 { - let opt = self.delay.lock().take(); - if let Some(delay) = opt { - if delay.recv().is_err() { - println!("Error waiting for delay!"); - } - } - } - - if self.invalidate.lock().contains(&hash) { - return futures::future::ready(Ok(InvalidTransaction::Custom(0).into())); - } - futures::future::ready(if nonce < block_number { - Ok(InvalidTransaction::Stale.into()) - } else { - let mut transaction = ValidTransaction { - priority: 4, - requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, - provides: if nonce == INVALID_NONCE { vec![] } else { vec![vec![nonce as u8]] }, - longevity: 3, - propagate: true, - }; - - if self.clear_requirements.lock().contains(&hash) { - transaction.requires.clear(); - } + let res = match uxt { + Extrinsic::Transfer { transfer, .. } => { + let nonce = transfer.nonce; + + // This is used to control the test flow. + if nonce > 0 { + let opt = self.delay.lock().take(); + if let Some(delay) = opt { + if delay.recv().is_err() { + println!("Error waiting for delay!"); + } + } + } - if self.add_requirements.lock().contains(&hash) { - transaction.requires.push(vec![128]); - } + if self.invalidate.lock().contains(&hash) { + InvalidTransaction::Custom(0).into() + } else if nonce < block_number { + InvalidTransaction::Stale.into() + } else { + let mut transaction = ValidTransaction { + priority: 4, + requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, + provides: if nonce == INVALID_NONCE { vec![] } else { vec![vec![nonce as u8]] }, + longevity: 3, + propagate: true, + }; + + if self.clear_requirements.lock().contains(&hash) { + transaction.requires.clear(); + } + + if self.add_requirements.lock().contains(&hash) { + transaction.requires.push(vec![128]); + } + + Ok(transaction) + } + }, + Extrinsic::IncludeData(_) => { + Ok(ValidTransaction { + priority: 9001, + requires: vec![], + provides: vec![vec![42]], + longevity: 9001, + propagate: false, + }) + }, + _ => unimplemented!(), + }; - Ok(Ok(transaction)) - }) + futures::future::ready(Ok(res)) } /// Returns a block number given the block id. @@ -579,7 +594,7 @@ mod tests { } fn pool() -> Pool { - Pool::new(Default::default(), TestApi::default().into()) + Pool::new(Default::default(), true.into(), TestApi::default().into()) } #[test] @@ -620,6 +635,26 @@ mod tests { assert_matches!(res.unwrap_err(), error::Error::TemporarilyBanned); } + #[test] + fn should_reject_unactionable_transactions() { + // given + let pool = Pool::new( + Default::default(), + // the node does not author blocks + false.into(), + TestApi::default().into(), + ); + + // after validation `IncludeData` will be set to non-propagable + let uxt = Extrinsic::IncludeData(vec![42]); + + // when + let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + + // then + assert_matches!(res.unwrap_err(), error::Error::Unactionable); + } + #[test] fn should_notify_about_pool_events() { let (stream, hash0, hash1) = { @@ -722,11 +757,14 @@ mod tests { count: 100, total_bytes: 200, }; - let pool = Pool::new(Options { + + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() - }, TestApi::default().into()); + }; + + let pool = Pool::new(options, true.into(), TestApi::default().into()); let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), @@ -757,11 +795,14 @@ mod tests { count: 100, total_bytes: 10, }; - let pool = Pool::new(Options { + + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() - }, TestApi::default().into()); + }; + + let pool = Pool::new(options, true.into(), TestApi::default().into()); // when block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { @@ -939,11 +980,13 @@ mod tests { count: 1, total_bytes: 1000, }; - let pool = Pool::new(Options { + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() - }, TestApi::default().into()); + }; + + let pool = Pool::new(options, true.into(), TestApi::default().into()); let xt = uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), @@ -977,7 +1020,7 @@ mod tests { let (tx, rx) = std::sync::mpsc::sync_channel(1); let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); - let pool = Arc::new(Pool::new(Default::default(), api.into())); + let pool = Arc::new(Pool::new(Default::default(), true.into(), api.into())); // when let xt = uxt(Transfer { diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index cbdb25078931e..c2af4f9cb9140 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/graph/src/rotator.rs b/client/transaction-pool/graph/src/rotator.rs index 65e21d0d4b506..3d9b359fd365f 100644 --- a/client/transaction-pool/graph/src/rotator.rs +++ b/client/transaction-pool/graph/src/rotator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/graph/src/tracked_map.rs b/client/transaction-pool/graph/src/tracked_map.rs index c799eb0b96ea1..9cd6ad84b483d 100644 --- a/client/transaction-pool/graph/src/tracked_map.rs +++ b/client/transaction-pool/graph/src/tracked_map.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index 86c2e75832f07..c02aab47d8808 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -90,9 +90,25 @@ pub type ValidatedTransactionFor = ValidatedTransaction< ::Error, >; +/// A closure that returns true if the local node is a validator that can author blocks. +pub struct IsValidator(Box bool + Send + Sync>); + +impl From for IsValidator { + fn from(is_validator: bool) -> Self { + IsValidator(Box::new(move || is_validator)) + } +} + +impl From bool + Send + Sync>> for IsValidator { + fn from(is_validator: Box bool + Send + Sync>) -> Self { + IsValidator(is_validator) + } +} + /// Pool that deals with validated transactions. pub struct ValidatedPool { api: Arc, + is_validator: IsValidator, options: Options, listener: RwLock, B>>, pool: RwLock ValidatedPool { /// Create a new transaction pool. - pub fn new(options: Options, api: Arc) -> Self { + pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { let base_pool = base::BasePool::new(options.reject_future_transactions); ValidatedPool { + is_validator, options, listener: Default::default(), api, @@ -183,6 +200,10 @@ impl ValidatedPool { fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + if !tx.propagate && !(self.is_validator.0)() { + return Err(error::Error::Unactionable.into()); + } + let imported = self.pool.write().import(tx)?; if let base::Imported::Ready { ref hash, .. } = imported { @@ -286,7 +307,7 @@ impl ValidatedPool { /// Transactions that are missing from the pool are not submitted. pub fn resubmit(&self, mut updated_transactions: HashMap, ValidatedTransactionFor>) { #[derive(Debug, Clone, Copy, PartialEq)] - enum Status { Future, Ready, Failed, Dropped }; + enum Status { Future, Ready, Failed, Dropped } let (mut initial_statuses, final_statuses) = { let mut pool = self.pool.write(); diff --git a/client/transaction-pool/graph/src/watcher.rs b/client/transaction-pool/graph/src/watcher.rs index 9d9a91bb23f69..6f8eb7c6e5668 100644 --- a/client/transaction-pool/graph/src/watcher.rs +++ b/client/transaction-pool/graph/src/watcher.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 853b66f6e74bb..fc14a5a0cba64 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/src/error.rs b/client/transaction-pool/src/error.rs index c0f795df1801a..62c812d14704a 100644 --- a/client/transaction-pool/src/error.rs +++ b/client/transaction-pool/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,30 +24,22 @@ use sp_transaction_pool::error::Error as TxPoolError; pub type Result = std::result::Result; /// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Pool error. - Pool(TxPoolError), - /// Blockchain error. - Blockchain(sp_blockchain::Error), - /// Error while converting a `BlockId`. - #[from(ignore)] + #[error("Transaction pool error")] + Pool(#[from] TxPoolError), + + #[error("Blockchain error")] + Blockchain(#[from] sp_blockchain::Error), + + #[error("Block conversion error: {0}")] BlockIdConversion(String), - /// Error while calling the runtime api. - #[from(ignore)] + + #[error("Runtime error: {0}")] RuntimeApi(String), } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Pool(ref err) => Some(err), - Error::Blockchain(ref err) => Some(err), - Error::BlockIdConversion(_) => None, - Error::RuntimeApi(_) => None, - } - } -} impl sp_transaction_pool::error::IntoPoolError for Error { fn into_pool_error(self) -> std::result::Result { diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index e03c01bd1d816..32525065b9798 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -163,7 +163,7 @@ impl BasicPool pub fn new_test( pool_api: Arc, ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { - let pool = Arc::new(sc_transaction_graph::Pool::new(Default::default(), pool_api.clone())); + let pool = Arc::new(sc_transaction_graph::Pool::new(Default::default(), true.into(), pool_api.clone())); let (revalidation_queue, background_task, notifier) = revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); ( @@ -184,12 +184,13 @@ impl BasicPool /// revalidation type. pub fn with_revalidation_type( options: sc_transaction_graph::Options, + is_validator: txpool::IsValidator, pool_api: Arc, prometheus: Option<&PrometheusRegistry>, revalidation_type: RevalidationType, spawner: impl SpawnNamed, ) -> Self { - let pool = Arc::new(sc_transaction_graph::Pool::new(options, pool_api.clone())); + let pool = Arc::new(sc_transaction_graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), RevalidationType::Full => { @@ -346,7 +347,7 @@ where ) -> Self { let pool_api = Arc::new(LightChainApi::new(client, fetcher)); Self::with_revalidation_type( - options, pool_api, prometheus, RevalidationType::Light, spawner, + options, false.into(), pool_api, prometheus, RevalidationType::Light, spawner, ) } } @@ -364,13 +365,14 @@ where /// Create new basic transaction pool for a full node with the provided api. pub fn new_full( options: sc_transaction_graph::Options, + is_validator: txpool::IsValidator, prometheus: Option<&PrometheusRegistry>, spawner: impl SpawnNamed, client: Arc, ) -> Arc { let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus)); let pool = Arc::new(Self::with_revalidation_type( - options, pool_api, prometheus, RevalidationType::Full, spawner + options, is_validator, pool_api, prometheus, RevalidationType::Full, spawner )); // make transaction pool available for off-chain runtime calls. diff --git a/client/transaction-pool/src/metrics.rs b/client/transaction-pool/src/metrics.rs index 376e6dfe94488..e0b70183a86b2 100644 --- a/client/transaction-pool/src/metrics.rs +++ b/client/transaction-pool/src/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index 7be8688eaea5d..fc18b0694d6ee 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -370,7 +370,7 @@ mod tests { fn setup() -> (Arc, Pool) { let test_api = Arc::new(TestApi::empty()); - let pool = Pool::new(Default::default(), test_api.clone()); + let pool = Pool::new(Default::default(), true.into(), test_api.clone()); (test_api, pool) } diff --git a/client/transaction-pool/src/testing/mod.rs b/client/transaction-pool/src/testing/mod.rs index 350c4137c37b2..9c7f1dfd7f336 100644 --- a/client/transaction-pool/src/testing/mod.rs +++ b/client/transaction-pool/src/testing/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 8fa742cd419a3..a41632ed8de88 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -37,7 +37,7 @@ use sc_block_builder::BlockBuilderProvider; use sp_consensus::BlockOrigin; fn pool() -> Pool { - Pool::new(Default::default(), TestApi::with_alice_nonce(209).into()) + Pool::new(Default::default(), true.into(), TestApi::with_alice_nonce(209).into()) } fn maintained_pool() -> ( @@ -161,7 +161,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { v.provides.push(vec![155]); })); - let pool = Pool::new(Default::default(), api.clone()); + let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt = uxt(Alice, 209); block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 1582eee5d9265..a918ef5d554ca 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,6 +6,159 @@ The format is based on [Keep a Changelog]. ## Unreleased +## 2.0.1-> 3.0.0 - Apollo 14 + +Most notably, this is the first release of the new FRAME (2.0) with its new macro-syntax and some changes in types, and pallet versioning. This release also incorporates the faster and improve version 2.0 of the parity-scale-codec and upgraded dependencies all-around. While the `FinalityTracker` pallet has been dropped, this release marks the first public appereance of a few new pallets, too;Bounties, Lottery, Tips (extracted from the `Treasury`-pallet, see #7536) and Merkle-Mountain-Ranges (MMR). + +On the client side, the most notable changes are around the keystore, making it async and switching to a different signing model allowing for remote-signing to be implemented; and various changes to improve networking and light-client support, like adding the Grandpa warp sync request-response protocol (#7711). + +_Contracts_: Please note that the contracts pallet _is not part_ of this release. The pallet is not yet ready and will be released separately in the coming weeks. The currently released contracts pallet _is not compatible_ with the new FRAME, thus if you need the contracts pallet, we recommend you wait with the upgrade until it has been released, too. +### Upgrade instructions + +Not too much has changed on the top and API level for developing Substrate betweeen 2.0 and 3.0. The easiest and quickest path for upgading is just to take the latest node-template and try applying your changes to it: +1. take a diff between 2.0 and your changes +2. store that diff +3. remove everything, copy over the 3.0 node-template +4. try re-applying your diff, manually, a hunk at a time. + +If that doesn't work for you, we are working on an in-depth-guide for all major changes that took place and how you need to adapt your code for it. [You can find the upgrade guide under `docs/` in the repo](https://github.com/paritytech/substrate/blob/master/docs/Upgrading-2.0-to-3.0.md), if you have further questions or problem, please [feel free to ask in the github discussion board](https://github.com/paritytech/substrate/discussions). + + +Runtime +------- + +* contracts: Charge rent for code storage (#7935) +* contracts: Emit event on contract termination (#8014) +* Fix elections-phragmen and proxy issue (#7040) +* Allow validators to block and kick their nominator set. (#7930) +* Decouple Stkaing and Election - Part1: Support traits (#7908) +* Introduces account existence providers reference counting (#7363) +* contracts: Cap the surcharge reward by the amount of rent that way payed by a contract (#7870) +* Use checked math when calculating storage size (#7885) +* Fix clear prefix check to avoid erasing child trie roots. (#7848) +* contracts: Collect rent for the first block during deployment (#7847) +* contracts: Add configurable per-storage item cost (#7819) +* babe: expose next epoch data (#7829) +* fix : remove `_{ }` syntax from benchmark macro (#7822) +* Define ss58 prefix inside the runtime (#7810) +* Allow council to slash treasury tip (#7753) +* Don't allow self proxies (#7803) +* add a `current_epoch` to BabeApi (#7789) +* Add `pallet` attribute macro to declare pallets (#6877) +* Make it possible to calculate the storage root as often as you want (#7714) +* Issue 7143 | Refactor Treasury Pallet into Bounties, Tips, and Proposals (#7536) +* Participating in Council Governance is Free for First Time Voters and Successful Closing (#7661) +* Streamline frame_system weight parametrization (#6629) +* Features needed for reserve-backed stablecoins (#7152) +* `sudo_as` should return a result (#7620) +* More Extensible Multiaddress Format (#7380) +* Fix `on_runtime_upgrade` weight recording (#7480) +* Implement batch_all and update Utility pallet for weight refunds (#7188) +* Fix wrong outgoing calculation in election (#7384) +* Implements pallet versioning (#7208) +* Runtime worker threads (#7089) +* Allow `schedule_after(0, ...)` to work (#7284) +* Fix offchain election to respect the weight (#7215) +* Fix weight for inner call with new origin (#7196) +* Move proxies migration (#7205) +* Introduce `cancel_proposal` to rid us of those pesky proposals (#7111) + +Client +------ + +* Remove backwards-compatibility networking hack (#8068) +* Extend SS58 network identifiers (#8039) +* Update dependencies ahead of next release (#8015) +* Storage chains: serve transactions over IPFS/bitswap (#7963) +* Add a send_request function to NetworkService (#8008) +* Rename system_networkState to system_unstable_networkState (#8001) +* Allow transaction for offchain indexing (#7290) +* Grandpa warp sync request-response protocol (#7711) +* Add explicit limits to notifications sizes and adjust yamux buffer size (#7925) +* Rework priority groups, take 2 (#7700) +* Define ss58 prefix inside the runtime (#7810) +* Expand remote keystore interface to allow for hybrid mode (#7628) +* Allow capping the amount of work performed when deleting a child trie (#7671) +* RPC to allow setting the log filter (#7474) +* Remove sc_network::NetworkService::register_notifications_protocol and partially refactor Grandpa tests (#7646) +* minor fix and improvements on localkeystore (#7626) +* contracts: Add `salt` argument to contract instantiation (#7482) +* contracts: Rework contracts_call RPC (#7468) +* Make sure to use the optimized method instead of reading the storage. (#7445) +* WASM Local-blob override (#7317) +* client/network: Allow configuring Kademlia's disjoint query paths (#7356) +* client/network: Remove option to disable yamux flow control (#7358) +* Make `queryStorage` and `storagePairs` unsafe RPC functions (#7342) +* No longer actively open legacy substreams (#7076) +* Make `run_node_until_exit` take a future (#7318) +* Add an system_syncState RPC method (#7315) +* Async keystore + Authority-Discovery async/await (#7000) +* Fixes logging of target names with dashes (#7281) +* Refactor CurrencyToVote (#6896) +* client/network: Stop sending noise legacy handshake (#7211) + +API +--- + +* pallet macro: easier syntax for `#[pallet::pallet]` with `struct Pallet(_)` (#8091) +* WasmExecutor takes a cache directory (#8057) +* Remove PalletInfo impl for () (#8090) +* Migrate assets pallet to new macros (#7984) +* contracts: Make ChainExtension trait generic over the runtime (#8003) +* Decouple the session validators from im-online (#7127) +* Update parity-scale-codec to 2.0 (#7994) +* Merkle Mountain Range pallet improvements (#7891) +* Cleaner GRANDPA RPC API for proving finality (#7339) +* Migrate frame-system to pallet attribute macro (#7898) +* Introduces account existence providers reference counting (#7363) +* contracts: Lazy storage removal (#7740) +* contracts: Allow runtime authors to define a chain extension (#7548) +* Define ss58 prefix inside the runtime (#7810) +* Add `pallet` attribute macro to declare pallets (#6877) +* Add keccak-512 to host functions. (#7531) +* Merkle Mountain Range pallet (#7312) +* Allow capping the amount of work performed when deleting a child trie (#7671) +* add an upgrade_keys method for pallet-session (#7688) +* Streamline frame_system weight parametrization (#6629) +* Rename pallet trait `Trait` to `Config` (#7599) +* contracts: Add `salt` argument to contract instantiation (#7482) +* pallet-evm: move to Frontier (Part IV) (#7573) +* refactor subtrait/elevated trait as not needed (#7497) +* Allow BabeConsensusDataProvider fork existing chain (#7078) +* decouple transaction payment and currency (#6912) +* contracts: Refactor the runtime API in order to simplify node integration (#7409) +* client/authority-discovery: Remove sentry node logic (#7368) +* client/network: Make NetworkService::set_priority_group async (#7352) +* *: Bump async-std to v1.6.5 (#7306) +* babe: make secondary slot randomness available on-chain (#7053) +* allow where clause in decl_error (#7324) +* reschedule (#6860) +* SystemOrigin trait (#7226) +* permit setting treasury pallet initial funding through genesis (#7214) + +Runtime Migrations +------------------ + +* Migrate assets pallet to new macros (#7984) +* Fix elections-phragmen and proxy issue (#7040) +* Allow validators to block and kick their nominator set. (#7930) +* Migrate frame-system to pallet attribute macro (#7898) +* Implements pallet versioning (#7208) +* Move proxies migration (#7205) + + +## 2.0.0-> 2.0.1 + +Patch release with backports to fix broken nightly builds. +Namely contains backports of + +* [#7381: Make Substrate compile with latest nightly](https://github.com/paritytech/substrate/pull/7381) +* [#7238: Fix compilation with environmental on latest nightly](https://github.com/paritytech/substrate/pull/7238) +* [#7395: Make benchmarks compile with latest nightly](https://github.com/paritytech/substrate/pull/7395) +* [#7838: Fix incorrect use of syn::exports](https://github.com/paritytech/substrate/pull/7838) (partially) +* [#7854: Update to futures 0.3.9](https://github.com/paritytech/substrate/pull/7854) + + ## 2.0.0-rc6 -> 2.0.0 – two dot 😮 Runtime diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index b195d5c657065..a3837e1677865 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -45,9 +45,6 @@ # Contracts /frame/contracts/ @athei -# EVM -/frame/evm/ @sorpaas - # NPoS and election /frame/staking/ @kianenigma /frame/elections/ @kianenigma diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index 3e1ca7f5a3269..6262ed9086a5e 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -10,11 +10,11 @@ Individuals making significant and valuable contributions are given commit-acces There are a few basic ground-rules for contributors (including the maintainer(s) of the project): -. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in your own repo. +. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in your own repo. No rewriting of the history after the code has been shared (e.g. through a Pull-Request). . **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work. . **All modifications** must be made in a **pull-request** to solicit feedback from other contributors. . A pull-request *must not be merged until CI* has finished successfully. -. Contributors should adhere to the ./STYLE_GUIDE.md[house coding style]. +. Contributors should adhere to the link:STYLE_GUIDE.md[house coding style]. == Merge Process diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/PULL_REQUEST_TEMPLATE.md index 8ca6ba9b01fe2..77f5f79f60d40 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/PULL_REQUEST_TEMPLATE.md @@ -14,7 +14,7 @@ Before you submitting, please check that: - [ ] Github's project assignment - [ ] You mentioned a related issue if this PR related to it, e.g. `Fixes #228` or `Related #1337`. - [ ] You asked any particular reviewers to review. If you aren't sure, start with GH suggestions. -- [ ] Your PR adheres to [the style guide](https://wiki.parity.io/Substrate-Style-Guide) +- [ ] Your PR adheres to [the style guide](https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md) - In particular, mind the maximal line length of 100 (120 in exceptional circumstances). - There is no commented code checked in unless necessary. - Any panickers have a proof or removed. diff --git a/docs/README.adoc b/docs/README.adoc index 7f3d50faac7d6..71052420b1aa9 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -308,28 +308,6 @@ cargo run --release \-- \ Additional Substrate CLI usage options are available and may be shown by running `cargo run \-- --help`. -=== WASM binaries - -The WASM binaries are built during the normal `cargo build` process. To control the WASM binary building, -we support multiple environment variables: - -* `SKIP_WASM_BUILD` - Skips building any WASM binary. This is useful when only native should be recompiled. -* `BUILD_DUMMY_WASM_BINARY` - Builds dummy WASM binaries. These dummy binaries are empty and useful - for `cargo check` runs. -* `WASM_BUILD_TYPE` - Sets the build type for building WASM binaries. Supported values are `release` or `debug`. - By default the build type is equal to the build type used by the main build. -* `FORCE_WASM_BUILD` - Can be set to force a WASM build. On subsequent calls the value of the variable - needs to change. As WASM builder instructs `cargo` to watch for file changes - this environment variable should only be required in certain circumstances. -* `WASM_TARGET_DIRECTORY` - Will copy release build WASM binary to the given directory. The path needs - to be absolute. -* `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. -* `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. - -Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. -Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will -be `NODE_RUNTIME`. - [[flaming-fir]] === Joining the Flaming Fir Testnet diff --git a/docs/SECURITY.md b/docs/SECURITY.md index 7240218fa8729..19f5b145feb5e 100644 --- a/docs/SECURITY.md +++ b/docs/SECURITY.md @@ -1,3 +1,4 @@ + # Security Policy Parity Technologies is committed to resolving security vulnerabilities in our software quickly and carefully. We take the necessary steps to minimize risk, provide timely information, and deliver vulnerability fixes and mitigations required to address security issues. diff --git a/docs/Structure.adoc b/docs/Structure.adoc index c8cd63506a347..6c810a83c51b9 100644 --- a/docs/Structure.adoc +++ b/docs/Structure.adoc @@ -33,7 +33,7 @@ In the lowest level, Substrate defines primitives, interfaces and traits to impl === Client * _found in_: `/client` -* _crates prefix_: `substrate-` +* _crates prefix_: `sc-` * _constraints_: ** crates may not (dev-)depend on any `frame-`-crates diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md new file mode 100644 index 0000000000000..bc4a15eb15f27 --- /dev/null +++ b/docs/Upgrading-2.0-to-3.0.md @@ -0,0 +1,1120 @@ +# Upgrading from Substrate 2.0 to 3.0 + +An incomplete guide. + +## Refreshing the node-template + +Not much has changed on the top and API level for developing Substrate betweeen 2.0 and 3.0. If you've made only small changes to the node-template, we recommend to do the following - it is easiest and quickest path forward: +1. take a diff between 2.0 and your changes +2. store that diff +3. remove everything, copy over the 3.0 node-template +4. try re-applying your diff, manually, a hunk at a time. + +## In-Depth guide on the changes + +If you've made significant changes or diverted from the node-template a lot, starting out with that is probably not helping. For that case, we'll take a look at all changes between 2.0 and 3.0 to the fully-implemented node and explain them one by one, so you can follow up, what needs to be changing for your node. + +_Note_: Of course, step 1 is to upgrade your `Cargo.toml`'s to use the latest version of Substrate and all dependencies. + +We'll be taking the diff from 2.0.1 to 3.0.0 on `bin/node` as the baseline of what has changed between these two versions in terms of adapting ones code base. We will not be covering the changes made on the tests and bench-marking as they are mostly reactions to the other changes. + +### Versions upgrade + +First and foremost you have to upgrade the version pf the dependencies of course, that's `0.8.x -> 0.9.0` and `2.0.x -> 3.0.0` for all `sc-`, `sp-`, `frame-`, and `pallet-` coming from Parity. Further more this release also upgraded its own dependencies, most notably, we are now using `parity-scale-codec 2.0`, `parking_lot 0.11` and `substrate-wasm-builder 3.0.0` (as build dependency). All other dependency upgrades should resolve automatically or are just internal. However you might see some error that another dependency/type you have as a dependency and one of our upgraded crates don't match up, if so please check the version of said dependency - we've probably ugraded it. + +### WASM-Builder + +The new version of wasm-builder has gotten a bit smarter and a lot faster (you should definitly switch). Once you've upgraded the dependency, in most cases you just have to remove the now obsolete `with_wasm_builder_from_crates_or_path`-function and you are good to go: + +```diff: rust +--- a/bin/node/runtime/build.rs ++++ b/bin/node/runtime/build.rs +@@ -15,12 +15,11 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-use wasm_builder_runner::WasmBuilder; ++use substrate_wasm_builder::WasmBuilder; + + fn main() { + WasmBuilder::new() + .with_current_project() +- .with_wasm_builder_from_crates_or_path("2.0.0", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() +``` + +### Runtime + +#### FRAME 2.0 + +The new FRAME 2.0 macros are a lot nicer to use and easier to read. While we were on that change though, we also cleaned up some mainly internal names and traits. The old `macro`'s still work and also produce the new structure, however, when plugging all that together as a Runtime, there's some things we have to adapt now: + +##### `::Trait for Runtime` becomes `::Config for Runtime` + +The most visible and significant change is that the macros no longer generate the `$pallet::Trait` but now a much more aptly named `$pallet::Config`. Thus, we need to rename all `::Trait for Runtime` into`::Config for Runtime`, e.g. for the `sudo` pallet we must do: + +```diff +-impl pallet_sudo::Trait for Runtime { ++impl pallet_sudo::Config for Runtime { +``` + +The same goes for all `` and alike, which simply becomes ``. + +#### SS58 Prefix is now a runtime param + + +Since [#7810](https://github.com/paritytech/substrate/pull/7810) we don't define the ss58 prefix in the chainspec anymore but moved it into the runtime. Namely, `frame_system` now needs a new `SS58Prefix`, which in substrate node we have defined for ourselves as: `pub const SS58Prefix: u8 = 42;`. Use your own chain-specific value there. + +#### Weight Definition + +`type WeightInfo` has changed and instead on `weights::pallet_$name::WeightInfo` is now bound to the Runtime as `pallet_$name::weights::SubstrateWeight`. As a result we have to the change the type definitions everywhere in our Runtime accordingly: + +```diff +- type WeightInfo = weights::pallet_$name::WeightInfo; ++ type WeightInfo = pallet_$name::weights::SubstrateWeight; +``` + +e.g. +```diff +- type WeightInfo = weights::pallet_collective::WeightInfo; ++ type WeightInfo = pallet_collective::weights::SubstrateWeight; +``` +and + +```diff +- type WeightInfo = weights::pallet_proxy::WeightInfo; ++ type WeightInfo = pallet_proxy::weights::SubstrateWeight; +``` + +And update the overall definition for weights on frame and a few related types and runtime parameters: + +```diff= + +-const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); ++/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. ++/// This is used to limit the maximal weight of a single extrinsic. ++const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); ++/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used ++/// by Operational extrinsics. ++const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); ++/// We allow for 2 seconds of compute with a 6 second average block time. ++const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; ++ + parameter_types! { + pub const BlockHashCount: BlockNumber = 2400; +- /// We allow for 2 seconds of compute with a 6 second average block time. +- pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; +- pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); +- /// Assume 10% of weight for average on_initialize calls. +- pub MaximumExtrinsicWeight: Weight = +- AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) +- * MaximumBlockWeight::get(); +- pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; + pub const Version: RuntimeVersion = VERSION; +-} +- +-const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); +- +-impl frame_system::Trait for Runtime { ++ pub RuntimeBlockLength: BlockLength = ++ BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); ++ pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() ++ .base_block(BlockExecutionWeight::get()) ++ .for_class(DispatchClass::all(), |weights| { ++ weights.base_extrinsic = ExtrinsicBaseWeight::get(); ++ }) ++ .for_class(DispatchClass::Normal, |weights| { ++ weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); ++ }) ++ .for_class(DispatchClass::Operational, |weights| { ++ weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); ++ // Operational transactions have some extra reserved space, so that they ++ // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. ++ weights.reserved = Some( ++ MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT ++ ); ++ }) ++ .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) ++ .build_or_panic(); ++} ++ ++const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); ++ ++impl frame_system::Config for Runtime { + type BaseCallFilter = (); ++ type BlockWeights = RuntimeBlockWeights; ++ type BlockLength = RuntimeBlockLength; ++ type DbWeight = RocksDbWeight; + type Origin = Origin; + type Call = Call; + type Index = Index; +@@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { + type Header = generic::Header; + type Event = Event; + type BlockHashCount = BlockHashCount; +- type MaximumBlockWeight = MaximumBlockWeight; +- type DbWeight = RocksDbWeight; +- type BlockExecutionWeight = BlockExecutionWeight; +- type ExtrinsicBaseWeight = ExtrinsicBaseWeight; +- type MaximumExtrinsicWeight = MaximumExtrinsicWeight; +- type MaximumBlockLength = MaximumBlockLength; +- type AvailableBlockRatio = AvailableBlockRatio; + type Version = Version; + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +- type SystemWeightInfo = weights::frame_system::WeightInfo; ++ type SystemWeightInfo = frame_system::weights::SubstrateWeight; +``` + +#### Pallets: + +##### Assets + +The assets pallet has seen a variety of changes: +- [Features needed for reserve-backed stablecoins #7152 ](https://github.com/paritytech/substrate/pull/7152) +- [Freeze Assets and Asset Metadata #7346 ](https://github.com/paritytech/substrate/pull/7346) +- [Introduces account existence providers reference counting #7363 ]((https://github.com/paritytech/substrate/pull/7363)) + +have all altered the feature set and changed the concepts. However, it has some of the best documentation and explains the current state very well. If you are using the assets pallet and need to upgrade from an earlier version, we recommend you use the current docs to guide your way! + +##### Contracts + +As noted in the changelog, the `contracts`-pallet is still undergoing massive changes and is not yet part of this release. We are expecting for it to be released a few weeks after. If your chain is dependent on this pallet, we recommend to wait until it has been released as the currently released version is not compatible with FRAME 2.0. + +#### (changes) Treasury + +As mentioned above, Bounties, Tips and Lottery have been extracted out of treasury into their own pallets - removing these options here. Secondly we must now specify the `BurnDestination` and `SpendFunds`, which now go the `Bounties`. + +```diff +- type Tippers = Elections; +- type TipCountdown = TipCountdown; +- type TipFindersFee = TipFindersFee; +- type TipReportDepositBase = TipReportDepositBase; +- type DataDepositPerByte = DataDepositPerByte; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; ++ type BurnDestination = (); ++ type SpendFunds = Bounties; +``` + +Factoring out Bounties and Tips means most of these definitions have now moved there, while the parameter types can be left as they were: + +###### 🆕 Bounties + +```rust= +impl pallet_bounties::Config for Runtime { + type Event = Event; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type WeightInfo = pallet_bounties::weights::SubstrateWeight; + } +``` + +###### 🆕 Tips + +```rust= +impl pallet_tips::Config for Runtime { + type Event = Event; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type Tippers = Elections; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type WeightInfo = pallet_tips::weights::SubstrateWeight; + } +``` + +#### `FinalityTracker` removed + +Finality Tracker has been removed in favor of a different approach to handle the issue in GRANDPA, [see #7228 for details](https://github.com/paritytech/substrate/pull/7228). With latest GRANDPA this is not needed anymore and can be removed without worry. + +#### (changes) Elections Phragmen + +The pallet has been moved to a new system in which the exact amount of deposit for each voter, candidate, member, or runner-up is now deposited on-chain. Moreover, the concept of a `defunct_voter` is removed, since votes now have adequet deposit associated with them. A number of configuration parameters has changed to reflect this, as shown below: + +```diff= + parameter_types! { + pub const CandidacyBond: Balance = 10 * DOLLARS; +- pub const VotingBond: Balance = 1 * DOLLARS; ++ // 1 storage item created, key size is 32 bytes, value size is 16+16. ++ pub const VotingBondBase: Balance = deposit(1, 64); ++ // additional data per vote is 32 bytes (account id). ++ pub const VotingBondFactor: Balance = deposit(0, 32); + pub const TermDuration: BlockNumber = 7 * DAYS; + pub const DesiredMembers: u32 = 13; + pub const DesiredRunnersUp: u32 = 7; + +@@ -559,16 +600,16 @@ impl pallet_elections_phragmen::Trait for Runtime { + // NOTE: this implies that council's genesis members cannot be set directly and must come from + // this module. + type InitializeMembers = Council; +- type CurrencyToVote = CurrencyToVoteHandler; ++ type CurrencyToVote = U128CurrencyToVote; + type CandidacyBond = CandidacyBond; +- type VotingBond = VotingBond; ++ type VotingBondBase = VotingBondBase; ++ type VotingBondFactor = VotingBondFactor; + type LoserCandidate = (); +- type BadReport = (); + type KickedMember = (); + type DesiredMembers = DesiredMembers; + type DesiredRunnersUp = DesiredRunnersUp; + type TermDuration = TermDuration; + ``` + + **This upgrade requires storage [migration](https://github.com/paritytech/substrate/blob/master/frame/elections-phragmen/src/migrations_3_0_0.rs)**. Further details can be found in the [pallet-specific changelog](https://github.com/paritytech/substrate/blob/master/frame/elections-phragmen/CHANGELOG.md#security). + +#### (changes) Democracy + +Democracy brings three new settings with this release, all to allow for better influx- and spam-control. Namely these allow to specify the maximum number of proposals at a time, who can blacklist and who can cancel proposals. This diff acts as a good starting point: + +```diff= +@@ -508,6 +537,14 @@ impl pallet_democracy::Trait for Runtime { + type FastTrackVotingPeriod = FastTrackVotingPeriod; + // To cancel a proposal which has been passed, 2/3 of the council must agree to it. + type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; ++ // To cancel a proposal before it has been passed, the technical committee must be unanimous or ++ // Root must agree. ++ type CancelProposalOrigin = EnsureOneOf< ++ AccountId, ++ EnsureRoot, ++ pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>, ++ >; ++ type BlacklistOrigin = EnsureRoot; + // Any single technical committee member may veto a coming council proposal, however they can + // only do it once and it lasts only for the cooloff period. + type VetoOrigin = pallet_collective::EnsureMember; +@@ -518,7 +555,8 @@ impl pallet_democracy::Trait for Runtime { + type Scheduler = Scheduler; + type PalletsOrigin = OriginCaller; + type MaxVotes = MaxVotes; ++ type MaxProposals = MaxProposals; + } +``` + +---- + +### Primitives + +The shared primitives define the API between Client and Runtime. Usually, you don't have to touch nor directly interact with them, unless you created your own client or frame-less runtime. Therefore we'd expect you to understand whether you are effected by changes and how to update your code yourself. + +---- + +### Client + +#### CLI + +A few minor things have changed in the `cli` (compared to 2.0.1): + +1. we've [replaced the newly added `BuildSyncSpec` subcommand with an RPC API](https://github.com/paritytech/substrate/commit/65cc9af9b8df8d36928f6144ee7474cefbd70454#diff-c57da6fbeff8c46ce15f55ea42fedaa5a4684d79578006ce4af01ae04fd6b8f8) in an on-going effort to make light-client-support smoother, see below +2. we've [removed double accounts from our chainspec-builder](https://github.com/paritytech/substrate/commit/31499cd29ed30df932fb71b7459796f7160d0272) +3. we [don't fallback to `--chain flaming-fir` anymore](https://github.com/paritytech/substrate/commit/13cdf1c8cd2ee62d411f82b64dc7eba860c9c6c6), if no chain is given our substrate-node will error. +4. [the `subkey`-integration has seen a fix to the `insert`-command](https://github.com/paritytech/substrate/commit/54bde60cfd2c544c54e9e8623b6b8725b99557f8) that requires you to now add the `&cli` as a param. + ```diff= + --- a/bin/node/cli/src/command.rs + +++ b/bin/node/cli/src/command.rs + @@ -92,7 +97,7 @@ pub fn run() -> Result<()> { + You can enable it with `--features runtime-benchmarks`.".into()) + } + } + - Some(Subcommand::Key(cmd)) => cmd.run(), + + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::Sign(cmd)) => cmd.run(), + Some(Subcommand::Verify(cmd)) => cmd.run(), + Some(Subcommand::Vanity(cmd)) => cmd.run(), + ``` + + +#### Service Builder Upgrades + +##### Light client support + +As said, we've added a new optional RPC service for improved light client support. For that to work, we need to pass the `chain_spec` and give access to the `AuxStore` to our `rpc`: + + +```diff= + +--- a/bin/node/rpc/src/lib.rs ++++ b/bin/node/rpc/src/lib.rs +@@ -49,6 +49,7 @@ use sp_consensus::SelectChain; + use sp_consensus_babe::BabeApi; + use sc_rpc::SubscriptionTaskExecutor; + use sp_transaction_pool::TransactionPool; ++use sc_client_api::AuxStore; + + /// Light client extra dependencies. + pub struct LightDeps { +@@ -94,6 +95,8 @@ pub struct FullDeps { + pub pool: Arc

, + /// The SelectChain Strategy + pub select_chain: SC, ++ /// A copy of the chain spec. ++ pub chain_spec: Box, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, + /// BABE specific dependencies. +@@ -109,9 +112,8 @@ pub type IoHandler = jsonrpc_core::IoHandler; + pub fn create_full( + deps: FullDeps, + ) -> jsonrpc_core::IoHandler where +- C: ProvideRuntimeApi, +- C: HeaderBackend + HeaderMetadata + 'static, +- C: Send + Sync + 'static, ++ C: ProvideRuntimeApi + HeaderBackend + AuxStore + ++ HeaderMetadata + Sync + Send + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, +@@ -131,6 +133,7 @@ pub fn create_full( + client, + pool, + select_chain, ++ chain_spec, + deny_unsafe, + babe, + grandpa, +@@ -164,8 +167,8 @@ pub fn create_full( + io.extend_with( + sc_consensus_babe_rpc::BabeApi::to_delegate( + BabeRpcHandler::new( +- client, +- shared_epoch_changes, ++ client.clone(), ++ shared_epoch_changes.clone(), + keystore, + babe_config, + select_chain, +@@ -176,7 +179,7 @@ pub fn create_full( + io.extend_with( + sc_finality_grandpa_rpc::GrandpaApi::to_delegate( + GrandpaRpcHandler::new( +- shared_authority_set, ++ shared_authority_set.clone(), + shared_voter_state, + justification_stream, + subscription_executor, + +``` + +and add the new service: + +```diff= +--- a/bin/node/rpc/src/lib.rs ++++ b/bin/node/rpc/src/lib.rs +@@ -185,6 +188,18 @@ pub fn create_full( + ) + ); + ++ io.extend_with( ++ sc_sync_state_rpc::SyncStateRpcApi::to_delegate( ++ sc_sync_state_rpc::SyncStateRpcHandler::new( ++ chain_spec, ++ client, ++ shared_authority_set, ++ shared_epoch_changes, ++ deny_unsafe, ++ ) ++ ) ++ ); ++ + io + } +``` + +##### Telemetry + +The telemetry subsystem has seen a few fixes and refactorings to allow for a more flexible handling, in particular in regards to parachains. Most notably `sc_service::spawn_tasks` now returns the `telemetry_connection_notifier` as the second member of the tuple, (`let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(`), which should be passed to `telemetry_on_connect` of `new_full_base` now: `telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),` (see the service-section below for a full diff). + +On the browser-side, this complicates setup a tiny bit, yet not terribly. Instead of `init_console_log`, we now use `init_logging_and_telemetry` and need to make sure we spawn the runner for its handleat the end (the other changes are formatting and cosmetics): + +```diff +--- a/bin/node/cli/src/browser.rs ++++ b/bin/node/cli/src/browser.rs +@@ -21,9 +21,8 @@ use log::info; + use wasm_bindgen::prelude::*; + use browser_utils::{ + Client, +- browser_configuration, set_console_error_panic_hook, init_console_log, ++ browser_configuration, init_logging_and_telemetry, set_console_error_panic_hook, + }; +-use std::str::FromStr; + + /// Starts the client. + #[wasm_bindgen] +@@ -33,29 +32,38 @@ pub async fn start_client(chain_spec: Option, log_level: String) -> Resu + .map_err(|err| JsValue::from_str(&err.to_string())) + } + +-async fn start_inner(chain_spec: Option, log_level: String) -> Result> { ++async fn start_inner( ++ chain_spec: Option, ++ log_directives: String, ++) -> Result> { + set_console_error_panic_hook(); +- init_console_log(log::Level::from_str(&log_level)?)?; ++ let telemetry_worker = init_logging_and_telemetry(&log_directives)?; + let chain_spec = match chain_spec { + Some(chain_spec) => ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) + .map_err(|e| format!("{:?}", e))?, + None => crate::chain_spec::development_config(), + }; + +- let config = browser_configuration(chain_spec).await?; ++ let telemetry_handle = telemetry_worker.handle(); ++ let config = browser_configuration( ++ chain_spec, ++ Some(telemetry_handle), ++ ).await?; + + info!("Substrate browser node"); + info!("✌️ version {}", config.impl_version); +- info!("❤️ by Parity Technologies, 2017-2020"); ++ info!("❤️ by Parity Technologies, 2017-2021"); + info!("📋 Chain specification: {}", config.chain_spec.name()); +- info!("🏷 Node name: {}", config.network.node_name); ++ info!("🏷 Node name: {}", config.network.node_name); + info!("👤 Role: {:?}", config.role); + + // Create the service. This is the most heavy initialization step. + let (task_manager, rpc_handlers) = + crate::service::new_light_base(config) +- .map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers)) ++ .map(|(components, rpc_handlers, _, _, _, _)| (components, rpc_handlers)) + .map_err(|e| format!("{:?}", e))?; + ++ task_manager.spawn_handle().spawn("telemetry", telemetry_worker.run()); ++ + Ok(browser_utils::start_client(task_manager, rpc_handlers)) + } + ``` + +##### Async & Remote Keystore support + +In order to allow for remote-keystores, the keystore-subsystem has been reworked to support async operations and generally refactored to not provide the keys itself but only sign on request. This allows for remote-keystore to never hand out keys and thus to operate any substrate-based node in a manner without ever having the private keys in the local system memory. + +There are some operations, however, that the keystore must be local for performance reasons and for which a remote keystore won't work (in particular around parachains). As such, the keystore has both a slot for remote but also always a local instance, where some operations hard bind to the local variant, while most subsystems just ask the generic keystore which prefers a remote signer if given. To reflect this change, `sc_service::new_full_parts` now returns a `KeystoreContainer` rather than the keystore, and the other subsystems (e.g. `sc_service::PartialComponents`) expect to be given that. + +###### on RPC: + +This has most visible changes for the rpc, where we are switching from the previous `KeyStorePtr` to the new `SyncCryptoStorePtr`: + +```diff + +--- a/bin/node/rpc/src/lib.rs ++++ b/bin/node/rpc/src/lib.rs +@@ -32,6 +32,7 @@ + + use std::sync::Arc; + ++use sp_keystore::SyncCryptoStorePtr; + use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; + use sc_consensus_babe::{Config, Epoch}; + use sc_consensus_babe_rpc::BabeRpcHandler; +@@ -40,7 +41,6 @@ use sc_finality_grandpa::{ + SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream + }; + use sc_finality_grandpa_rpc::GrandpaRpcHandler; +-use sc_keystore::KeyStorePtr; + pub use sc_rpc_api::DenyUnsafe; + use sp_api::ProvideRuntimeApi; + use sp_block_builder::BlockBuilder; + pub struct LightDeps { +@@ -69,7 +70,7 @@ pub struct BabeDeps { + /// BABE pending epoch changes. + pub shared_epoch_changes: SharedEpochChanges, + /// The keystore that manages the keys of the node. +- pub keystore: KeyStorePtr, ++ pub keystore: SyncCryptoStorePtr, + } + +``` + +##### GRANDPA + +As already in the changelog, a few things significant things have changed in regards to GRANDPA: the finality tracker has been replaced, an RPC command has been added and WARP-sync-support for faster light client startup has been implemented. All this means we have to do a few changes to our GRANDPA setup procedures in the client. + +First and foremost, grandpa internalised a few aspects, and thus `new_partial` doesn't expect a tuple but only the `grandpa::SharedVoterState` as input now, and unpacking that again later is not needed anymore either. On the opposite side `grandpa::FinalityProofProvider::new_for_service` now requires the `Some(shared_authority_set)` to be passed as a new third parameter. This set also becomes relevant when adding warp-sync-support, which is added as an extra-protocol-layer to the networking as: +```diff= + ++ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); ++ ++ #[cfg(feature = "cli")] ++ config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( ++ &config, task_manager.spawn_handle(), backend.clone(), ++ )); +``` + +As these changes pull through the enitrety of `cli/src/service.rs`, we recommend looking at the final diff below for guidance. + +##### In a nutshell + +Altogether this accumulates to the following diff for `node/cli/src/service.rs`. If you want these features and have modified your chain you should probably try to apply these patches: + + +```diff= +--- a/bin/node/cli/src/service.rs ++++ b/bin/node/cli/src/service.rs +@@ -22,11 +22,10 @@ + + use std::sync::Arc; + use sc_consensus_babe; +-use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; + use node_primitives::Block; + use node_runtime::RuntimeApi; + use sc_service::{ +- config::{Role, Configuration}, error::{Error as ServiceError}, ++ config::{Configuration}, error::{Error as ServiceError}, + RpcHandlers, TaskManager, + }; + use sp_inherents::InherentDataProviders; +@@ -34,8 +33,8 @@ use sc_network::{Event, NetworkService}; + use sp_runtime::traits::Block as BlockT; + use futures::prelude::*; + use sc_client_api::{ExecutorProvider, RemoteBackend}; +-use sp_core::traits::BareCryptoStorePtr; + use node_executor::Executor; ++use sc_telemetry::TelemetryConnectionNotifier; + + type FullClient = sc_service::TFullClient; + type FullBackend = sc_service::TFullBackend; +@@ -58,13 +57,10 @@ pub fn new_partial(config: &Configuration) -> Result, + sc_consensus_babe::BabeLink, + ), +- ( +- grandpa::SharedVoterState, +- Arc>, +- ), ++ grandpa::SharedVoterState, + ) + >, ServiceError> { +- let (client, backend, keystore, task_manager) = ++ let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::(&config)?; + let client = Arc::new(client); + +@@ -94,7 +90,6 @@ pub fn new_partial(config: &Configuration) -> Result Result Result Result, + &sc_consensus_babe::BabeLink, + ) + ) -> Result { + let sc_service::PartialComponents { +- client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, ++ client, ++ backend, ++ mut task_manager, ++ import_queue, ++ keystore_container, ++ select_chain, ++ transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup), + } = new_partial(&config)?; + +- let (shared_voter_state, finality_proof_provider) = rpc_setup; ++ let shared_voter_state = rpc_setup; ++ ++ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); ++ ++ #[cfg(feature = "cli")] ++ config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( ++ &config, task_manager.spawn_handle(), backend.clone(), ++ )); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { +@@ -191,8 +209,6 @@ pub fn new_full_base( + import_queue, + on_demand: None, + block_announce_validator_builder: None, +- finality_proof_request_builder: None, +- finality_proof_provider: Some(finality_proof_provider.clone()), + })?; + + if config.offchain_worker.enabled { +@@ -203,26 +219,28 @@ pub fn new_full_base( + + let role = config.role.clone(); + let force_authoring = config.force_authoring; ++ let backoff_authoring_blocks = ++ Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); +- let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); + +- sc_service::spawn_tasks(sc_service::SpawnTasksParams { +- config, +- backend: backend.clone(), +- client: client.clone(), +- keystore: keystore.clone(), +- network: network.clone(), +- rpc_extensions_builder: Box::new(rpc_extensions_builder), +- transaction_pool: transaction_pool.clone(), +- task_manager: &mut task_manager, +- on_demand: None, +- remote_blockchain: None, +- telemetry_connection_sinks: telemetry_connection_sinks.clone(), +- network_status_sinks: network_status_sinks.clone(), +- system_rpc_tx, +- })?; ++ let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( ++ sc_service::SpawnTasksParams { ++ config, ++ backend: backend.clone(), ++ client: client.clone(), ++ keystore: keystore_container.sync_keystore(), ++ network: network.clone(), ++ rpc_extensions_builder: Box::new(rpc_extensions_builder), ++ transaction_pool: transaction_pool.clone(), ++ task_manager: &mut task_manager, ++ on_demand: None, ++ remote_blockchain: None, ++ network_status_sinks: network_status_sinks.clone(), ++ system_rpc_tx, ++ }, ++ )?; + + let (block_import, grandpa_link, babe_link) = import_setup; + +@@ -230,6 +248,7 @@ pub fn new_full_base( + + if let sc_service::config::Role::Authority { .. } = &role { + let proposer = sc_basic_authorship::ProposerFactory::new( ++ task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), +@@ -239,7 +258,7 @@ pub fn new_full_base( + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let babe_config = sc_consensus_babe::BabeParams { +- keystore: keystore.clone(), ++ keystore: keystore_container.sync_keystore(), + client: client.clone(), + select_chain, + env: proposer, +@@ -247,6 +266,7 @@ pub fn new_full_base( + sync_oracle: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring, ++ backoff_authoring_blocks, + babe_link, + can_author_with, + }; +@@ -256,42 +276,30 @@ pub fn new_full_base( + } + + // Spawn authority discovery module. +- if matches!(role, Role::Authority{..} | Role::Sentry {..}) { +- let (sentries, authority_discovery_role) = match role { +- sc_service::config::Role::Authority { ref sentry_nodes } => ( +- sentry_nodes.clone(), +- sc_authority_discovery::Role::Authority ( +- keystore.clone(), +- ), +- ), +- sc_service::config::Role::Sentry {..} => ( +- vec![], +- sc_authority_discovery::Role::Sentry, +- ), +- _ => unreachable!("Due to outer matches! constraint; qed.") +- }; +- ++ if role.is_authority() { ++ let authority_discovery_role = sc_authority_discovery::Role::PublishAndDiscover( ++ keystore_container.keystore(), ++ ); + let dht_event_stream = network.event_stream("authority-discovery") + .filter_map(|e| async move { match e { + Event::Dht(e) => Some(e), + _ => None, +- }}).boxed(); ++ }}); + let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service( + client.clone(), + network.clone(), +- sentries, +- dht_event_stream, ++ Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + +- task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker); ++ task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker.run()); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if role.is_authority() { +- Some(keystore as BareCryptoStorePtr) ++ Some(keystore_container.sync_keystore()) + } else { + None + }; +@@ -317,8 +325,7 @@ pub fn new_full_base( + config, + link: grandpa_link, + network: network.clone(), +- inherent_data_providers: inherent_data_providers.clone(), +- telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), ++ telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state, +@@ -330,17 +337,15 @@ pub fn new_full_base( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); +- } else { +- grandpa::setup_disabled_grandpa( +- client.clone(), +- &inherent_data_providers, +- network.clone(), +- )?; + } + + network_starter.start_network(); + Ok(NewFullBase { +- task_manager, inherent_data_providers, client, network, network_status_sinks, ++ task_manager, ++ inherent_data_providers, ++ client, ++ network, ++ network_status_sinks, + transaction_pool, + }) + } +@@ -353,14 +358,16 @@ pub fn new_full(config: Configuration) + }) + } + +-pub fn new_light_base(config: Configuration) -> Result<( +- TaskManager, RpcHandlers, Arc, ++pub fn new_light_base(mut config: Configuration) -> Result<( ++ TaskManager, RpcHandlers, Option, Arc, + Arc::Hash>>, + Arc>> + ), ServiceError> { +- let (client, backend, keystore, mut task_manager, on_demand) = ++ let (client, backend, keystore_container, mut task_manager, on_demand) = + sc_service::new_light_parts::(&config)?; + ++ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); ++ + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( +@@ -371,14 +378,12 @@ pub fn new_light_base(config: Configuration) -> Result<( + on_demand.clone(), + )); + +- let grandpa_block_import = grandpa::light_block_import( +- client.clone(), backend.clone(), &(client.clone() as Arc<_>), +- Arc::new(on_demand.checker().clone()), ++ let (grandpa_block_import, _) = grandpa::block_import( ++ client.clone(), ++ &(client.clone() as Arc<_>), ++ select_chain.clone(), + )?; +- +- let finality_proof_import = grandpa_block_import.clone(); +- let finality_proof_request_builder = +- finality_proof_import.create_finality_proof_request_builder(); ++ let justification_import = grandpa_block_import.clone(); + + let (babe_block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, +@@ -391,8 +396,7 @@ pub fn new_light_base(config: Configuration) -> Result<( + let import_queue = sc_consensus_babe::import_queue( + babe_link, + babe_block_import, +- None, +- Some(Box::new(finality_proof_import)), ++ Some(Box::new(justification_import)), + client.clone(), + select_chain.clone(), + inherent_data_providers.clone(), +@@ -401,9 +405,6 @@ pub fn new_light_base(config: Configuration) -> Result<( + sp_consensus::NeverCanAuthor, + )?; + +- let finality_proof_provider = +- GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); +- + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, +@@ -413,8 +414,6 @@ pub fn new_light_base(config: Configuration) -> Result<( + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, +- finality_proof_request_builder: Some(finality_proof_request_builder), +- finality_proof_provider: Some(finality_proof_provider), + })?; + network_starter.start_network(); + +@@ -433,32 +432,39 @@ pub fn new_light_base(config: Configuration) -> Result<( + + let rpc_extensions = node_rpc::create_light(light_deps); + +- let rpc_handlers = ++ let (rpc_handlers, telemetry_connection_notifier) = + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), + client: client.clone(), + transaction_pool: transaction_pool.clone(), +- config, keystore, backend, network_status_sinks, system_rpc_tx, ++ keystore: keystore_container.sync_keystore(), ++ config, backend, network_status_sinks, system_rpc_tx, + network: network.clone(), +- telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), + task_manager: &mut task_manager, + })?; + +- Ok((task_manager, rpc_handlers, client, network, transaction_pool)) ++ Ok(( ++ task_manager, ++ rpc_handlers, ++ telemetry_connection_notifier, ++ client, ++ network, ++ transaction_pool, ++ )) + } + + /// Builds a new service for a light client. + pub fn new_light(config: Configuration) -> Result { +- new_light_base(config).map(|(task_manager, _, _, _, _)| { ++ new_light_base(config).map(|(task_manager, _, _, _, _, _)| { + task_manager + }) + } + + #[cfg(test)] + mod tests { +- use std::{sync::Arc, borrow::Cow, any::Any}; ++ use std::{sync::Arc, borrow::Cow, any::Any, convert::TryInto}; + use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; + use sc_consensus_epochs::descendent_query; + use sp_consensus::{ +@@ -469,20 +475,25 @@ mod tests { + use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; + use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; + use codec::Encode; +- use sp_core::{crypto::Pair as CryptoPair, H256}; ++ use sp_core::{ ++ crypto::Pair as CryptoPair, ++ H256, ++ Public ++ }; ++ use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; + use sp_runtime::{ + generic::{BlockId, Era, Digest, SignedPayload}, + traits::{Block as BlockT, Header as HeaderT}, + traits::Verify, + }; + use sp_timestamp; +- use sp_finality_tracker; + use sp_keyring::AccountKeyring; + use sc_service_test::TestNetNode; + use crate::service::{new_full_base, new_light_base, NewFullBase}; +- use sp_runtime::traits::IdentifyAccount; ++ use sp_runtime::{key_types::BABE, traits::IdentifyAccount, RuntimeAppPublic}; + use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; + use sc_client_api::BlockBackend; ++ use sc_keystore::LocalKeystore; + + type AccountPublic = ::Signer; + +@@ -492,15 +503,15 @@ mod tests { + #[ignore] + fn test_sync() { + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); +- let keystore = sc_keystore::Store::open(keystore_path.path(), None) +- .expect("Creates keystore"); +- let alice = keystore.write().insert_ephemeral_from_seed::("//Alice") +- .expect("Creates authority pair"); ++ let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) ++ .expect("Creates keystore")); ++ let alice: sp_consensus_babe::AuthorityId = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) ++ .expect("Creates authority pair").into(); + + let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); + + // For the block factory +- let mut slot_num = 1u64; ++ let mut slot = 1u64; + + // For the extrinsics factory + let bob = Arc::new(AccountKeyring::Bob.pair()); +@@ -528,14 +539,13 @@ mod tests { + Ok((node, (inherent_data_providers, setup_handles.unwrap()))) + }, + |config| { +- let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; ++ let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, + |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { + let mut inherent_data = inherent_data_providers + .create_inherent_data() + .expect("Creates inherent data."); +- inherent_data.replace_data(sp_finality_tracker::INHERENT_IDENTIFIER, &1u64); + + let parent_id = BlockId::number(service.client().chain_info().best_number); + let parent_header = service.client().header(&parent_id).unwrap().unwrap(); +@@ -552,6 +562,7 @@ mod tests { + ); + + let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( ++ service.spawn_handle(), + service.client(), + service.transaction_pool(), + None, +@@ -561,7 +572,7 @@ mod tests { + descendent_query(&*service.client()), + &parent_hash, + parent_number, +- slot_num, ++ slot.into(), + ).unwrap().unwrap(); + + let mut digest = Digest::::default(); +@@ -569,18 +580,18 @@ mod tests { + // even though there's only one authority some slots might be empty, + // so we must keep trying the next slots until we can claim one. + let babe_pre_digest = loop { +- inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); ++ inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot * SLOT_DURATION)); + if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( +- slot_num, ++ slot.into(), + &parent_header, + &*service.client(), +- &keystore, ++ keystore.clone(), + &babe_link, + ) { + break babe_pre_digest; + } + +- slot_num += 1; ++ slot += 1; + }; + + digest.push(::babe_pre_digest(babe_pre_digest)); +@@ -600,11 +611,18 @@ mod tests { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = pre_hash.encode(); +- let signature = alice.sign(&to_sign[..]); ++ let signature = SyncCryptoStore::sign_with( ++ &*keystore, ++ sp_consensus_babe::AuthorityId::ID, ++ &alice.to_public_crypto_pair(), ++ &to_sign, ++ ).unwrap() ++ .try_into() ++ .unwrap(); + let item = ::babe_seal( +- signature.into(), ++ signature, + ); +- slot_num += 1; ++ slot += 1; + + let mut params = BlockImportParams::new(BlockOrigin::File, new_header); + params.post_digests.push(item); +@@ -679,7 +697,7 @@ mod tests { + Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) + }, + |config| { +- let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; ++ let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, + vec![ +``` diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index d1742e567cfac..67fa0af3d63be 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,25 +14,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-std = { version = "2.0.0", path = "../../primitives/std" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-std = { version = "3.0.0", path = "../../primitives/std" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ "serde", "codec/std", + "sp-std/std", "sp-runtime/std", "frame-support/std", "frame-system/std", + "frame-benchmarking/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] diff --git a/frame/assets/README.md b/frame/assets/README.md index 6b3fe21e52775..44c4eedc31be7 100644 --- a/frame/assets/README.md +++ b/frame/assets/README.md @@ -11,9 +11,9 @@ with a fixed supply, including: * Asset Transfer * Asset Destruction -To use it in your runtime, you need to implement the assets [`Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). +To use it in your runtime, you need to implement the assets [`assets::Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). -The supported dispatchable functions are documented in the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. +The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. ### Terminology @@ -72,10 +72,10 @@ use pallet_assets as assets; use frame_support::{decl_module, dispatch, ensure}; use frame_system::ensure_signed; -pub trait Trait: assets::Trait { } +pub trait Config: assets::Config { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { let sender = ensure_signed(origin).map_err(|e| e.as_str())?; @@ -106,11 +106,11 @@ Below are assumptions that must be held when using this module. If any of them are violated, the behavior of this module is undefined. * The total count of assets should be less than - `Trait::AssetId::max_value()`. + `Config::AssetId::max_value()`. ## Related Modules * [`System`](https://docs.rs/frame-system/latest/frame_system/) * [`Support`](https://docs.rs/frame-support/latest/frame_support/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs new file mode 100644 index 0000000000000..986eedfb6a861 --- /dev/null +++ b/frame/assets/src/benchmarking.rs @@ -0,0 +1,352 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet benchmarking. + +use super::*; +use sp_runtime::traits::Bounded; +use frame_system::RawOrigin as SystemOrigin; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_support::traits::Get; + +use crate::Module as Assets; + +const SEED: u32 = 0; + +fn create_default_asset(max_zombies: u32) + -> (T::AccountId, ::Source) +{ + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let root = SystemOrigin::Root.into(); + assert!(Assets::::force_create( + root, + Default::default(), + caller_lookup.clone(), + max_zombies, + 1u32.into(), + ).is_ok()); + (caller, caller_lookup) +} + +fn create_default_minted_asset(max_zombies: u32, amount: T::Balance) + -> (T::AccountId, ::Source) +{ + let (caller, caller_lookup) = create_default_asset::(max_zombies); + assert!(Assets::::mint( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + caller_lookup.clone(), + amount, + ).is_ok()); + (caller, caller_lookup) +} + +fn add_zombies(minter: T::AccountId, n: u32) { + let origin = SystemOrigin::Signed(minter); + for i in 0..n { + let target = account("zombie", i, SEED); + let target_lookup = T::Lookup::unlookup(target); + assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); + } +} + +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks! { + create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1, 1u32.into()) + verify { + assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); + } + + force_create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + }: _(SystemOrigin::Root, Default::default(), caller_lookup, 1, 1u32.into()) + verify { + assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); + } + + destroy { + let z in 0 .. 10_000; + let (caller, _) = create_default_asset::(10_000); + add_zombies::(caller.clone(), z); + }: _(SystemOrigin::Signed(caller), Default::default(), 10_000) + verify { + assert_last_event::(Event::Destroyed(Default::default()).into()); + } + + force_destroy { + let z in 0 .. 10_000; + let (caller, _) = create_default_asset::(10_000); + add_zombies::(caller.clone(), z); + }: _(SystemOrigin::Root, Default::default(), 10_000) + verify { + assert_last_event::(Event::Destroyed(Default::default()).into()); + } + + mint { + let (caller, caller_lookup) = create_default_asset::(10); + let amount = T::Balance::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) + verify { + assert_last_event::(Event::Issued(Default::default(), caller, amount).into()); + } + + burn { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(10, amount); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) + verify { + assert_last_event::(Event::Burned(Default::default(), caller, amount).into()); + } + + transfer { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(10, amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) + verify { + assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + } + + force_transfer { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(10, amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, target_lookup, amount) + verify { + assert_last_event::( + Event::ForceTransferred(Default::default(), caller, target, amount).into() + ); + } + + freeze { + let (caller, caller_lookup) = create_default_minted_asset::(10, 100u32.into()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(Event::Frozen(Default::default(), caller).into()); + } + + thaw { + let (caller, caller_lookup) = create_default_minted_asset::(10, 100u32.into()); + Assets::::freeze( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + caller_lookup.clone(), + )?; + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(Event::Thawed(Default::default(), caller).into()); + } + + freeze_asset { + let (caller, caller_lookup) = create_default_minted_asset::(10, 100u32.into()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default()) + verify { + assert_last_event::(Event::AssetFrozen(Default::default()).into()); + } + + thaw_asset { + let (caller, caller_lookup) = create_default_minted_asset::(10, 100u32.into()); + Assets::::freeze_asset( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + )?; + }: _(SystemOrigin::Signed(caller.clone()), Default::default()) + verify { + assert_last_event::(Event::AssetThawed(Default::default()).into()); + } + + transfer_ownership { + let (caller, _) = create_default_asset::(10); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller), Default::default(), target_lookup) + verify { + assert_last_event::(Event::OwnerChanged(Default::default(), target).into()); + } + + set_team { + let (caller, _) = create_default_asset::(10); + let target0 = T::Lookup::unlookup(account("target", 0, SEED)); + let target1 = T::Lookup::unlookup(account("target", 1, SEED)); + let target2 = T::Lookup::unlookup(account("target", 2, SEED)); + }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) + verify { + assert_last_event::(Event::TeamChanged( + Default::default(), + account("target", 0, SEED), + account("target", 1, SEED), + account("target", 2, SEED), + ).into()); + } + + set_max_zombies { + let (caller, _) = create_default_asset::(10); + let max_zombies: u32 = 100; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller), Default::default(), max_zombies) + verify { + assert_last_event::(Event::MaxZombiesChanged(Default::default(), max_zombies).into()); + } + + set_metadata { + let n in 0 .. T::StringLimit::get(); + let s in 0 .. T::StringLimit::get(); + + let name = vec![0u8; n as usize]; + let symbol = vec![0u8; s as usize]; + let decimals = 12; + + let (caller, _) = create_default_asset::(10); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller), Default::default(), name.clone(), symbol.clone(), decimals) + verify { + assert_last_event::(Event::MetadataSet(Default::default(), name, symbol, decimals).into()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + + #[test] + fn create() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_create::().is_ok()); + }); + } + + #[test] + fn force_create() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_force_create::().is_ok()); + }); + } + + #[test] + fn destroy() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_destroy::().is_ok()); + }); + } + + #[test] + fn force_destroy() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_force_destroy::().is_ok()); + }); + } + + #[test] + fn mint() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_mint::().is_ok()); + }); + } + + #[test] + fn burn() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_burn::().is_ok()); + }); + } + + #[test] + fn transfer() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_transfer::().is_ok()); + }); + } + + #[test] + fn force_transfer() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_force_transfer::().is_ok()); + }); + } + + #[test] + fn freeze() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_freeze::().is_ok()); + }); + } + + #[test] + fn thaw() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_thaw::().is_ok()); + }); + } + + #[test] + fn freeze_asset() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_freeze_asset::().is_ok()); + }); + } + + #[test] + fn thaw_asset() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_thaw_asset::().is_ok()); + }); + } + + #[test] + fn transfer_ownership() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_transfer_ownership::().is_ok()); + }); + } + + #[test] + fn set_team() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_set_team::().is_ok()); + }); + } + + #[test] + fn set_max_zombies() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_set_max_zombies::().is_ok()); + }); + } + + #[test] + fn set_metadata() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_set_metadata::().is_ok()); + }); + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e5ad2ae352eb8..7b04ea11bafed 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,41 +24,72 @@ //! The Assets module provides functionality for asset management of fungible asset classes //! with a fixed supply, including: //! -//! * Asset Issuance -//! * Asset Transfer -//! * Asset Destruction +//! * Asset Issuance (Minting) +//! * Asset Transferal +//! * Asset Freezing +//! * Asset Destruction (Burning) //! -//! To use it in your runtime, you need to implement the assets [`Trait`](./trait.Trait.html). +//! To use it in your runtime, you need to implement the assets [`Config`]. //! -//! The supported dispatchable functions are documented in the [`Call`](./enum.Call.html) enum. +//! The supported dispatchable functions are documented in the [`Call`] enum. //! //! ### Terminology //! -//! * **Asset issuance:** The creation of a new asset, whose total supply will belong to the -//! account that issues the asset. -//! * **Asset transfer:** The action of transferring assets from one account to another. -//! * **Asset destruction:** The process of an account removing its entire holding of an asset. -//! * **Fungible asset:** An asset whose units are interchangeable. -//! * **Non-fungible asset:** An asset for which each unit has unique characteristics. +//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and it's +//! assets, as well as forcibly transfer a particular class of assets between arbitrary accounts +//! and reduce the balance of a particular class of assets of arbitrary accounts. +//! * **Asset issuance/minting**: The creation of a new asset, whose total supply will belong to the +//! account that issues the asset. This is a privileged operation. +//! * **Asset transfer**: The reduction of the balance of an asset of one account with the +//! corresponding increase in the balance of another. +//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is +//! a privileged operation. +//! * **Fungible asset**: An asset whose units are interchangeable. +//! * **Issuer**: An account ID uniquely privileged to be able to mint a particular class of assets. +//! * **Freezer**: An account ID uniquely privileged to be able to freeze an account from +//! transferring a particular class of assets. +//! * **Freezing**: Removing the possibility of an unpermissioned transfer of an asset from a +//! particular account. +//! * **Non-fungible asset**: An asset for which each unit has unique characteristics. +//! * **Owner**: An account ID uniquely privileged to be able to destroy a particular asset class, +//! or to set the Issuer, Freezer or Admin of that asset class. +//! * **Zombie**: An account which has a balance of some assets in this pallet, but no other +//! footprint on-chain, in particular no account managed in the `frame_system` pallet. //! //! ### Goals //! //! The assets system in Substrate is designed to make the following possible: //! -//! * Issue a unique asset to its creator's account. +//! * Issue a new assets in a permissioned or permissionless way, if permissionless, then with a +//! deposit required. +//! * Allow accounts to hold these assets without otherwise existing on-chain (*zombies*). //! * Move assets between accounts. -//! * Remove an account's balance of an asset when requested by that account's owner and update -//! the asset's total supply. +//! * Update the asset's total supply. +//! * Allow administrative activities by specially privileged accounts including freezing account +//! balances and minting/burning assets. //! //! ## Interface //! -//! ### Dispatchable Functions +//! ### Permissionless Functions //! -//! * `issue` - Issues the total supply of a new fungible asset to the account of the caller of the function. -//! * `transfer` - Transfers an `amount` of units of fungible asset `id` from the balance of -//! the function caller's account (`origin`) to a `target` account. -//! * `destroy` - Destroys the entire holding of a fungible asset `id` associated with the account -//! that called the function. +//! * `create`: Creates a new asset class, taking the required deposit. +//! * `transfer`: Transfer sender's assets to another account. +//! +//! ### Permissioned Functions +//! +//! * `force_create`: Creates a new asset class without taking any deposit. +//! * `force_destroy`: Destroys an asset class. +//! +//! ### Privileged Functions +//! * `destroy`: Destroys an entire asset class; called by the asset class's Owner. +//! * `mint`: Increases the asset balance of an account; called by the asset class's Issuer. +//! * `burn`: Decreases the asset balance of an account; called by the asset class's Admin. +//! * `force_transfer`: Transfers between arbitrary accounts; called by the asset class's Admin. +//! * `freeze`: Disallows further `transfer`s from an account; called by the asset class's Freezer. +//! * `thaw`: Allows further `transfer`s from an account; called by the asset class's Admin. +//! * `transfer_ownership`: Changes an asset class's Owner; called by the asset class's Owner. +//! * `set_team`: Changes an asset class's Admin, Freezer and Issuer; called by the asset class's +//! Owner. //! //! Please refer to the [`Call`](./enum.Call.html) enum and its associated variants for documentation on each function. //! @@ -70,61 +101,6 @@ //! //! Please refer to the [`Module`](./struct.Module.html) struct for details on publicly available functions. //! -//! ## Usage -//! -//! The following example shows how to use the Assets module in your runtime by exposing public functions to: -//! -//! * Issue a new fungible asset for a token distribution event (airdrop). -//! * Query the fungible asset holding balance of an account. -//! * Query the total supply of a fungible asset that has been issued. -//! -//! ### Prerequisites -//! -//! Import the Assets module and types and derive your runtime's configuration traits from the Assets module trait. -//! -//! ### Simple Code Snippet -//! -//! ```rust,ignore -//! use pallet_assets as assets; -//! use frame_support::{decl_module, dispatch, ensure}; -//! use frame_system::ensure_signed; -//! -//! pub trait Trait: assets::Trait { } -//! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { -//! let sender = ensure_signed(origin).map_err(|e| e.as_str())?; -//! -//! const ACCOUNT_ALICE: u64 = 1; -//! const ACCOUNT_BOB: u64 = 2; -//! const COUNT_AIRDROP_RECIPIENTS: u64 = 2; -//! const TOKENS_FIXED_SUPPLY: u64 = 100; -//! -//! ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), "Divide by zero error."); -//! -//! let asset_id = Self::next_asset_id(); -//! -//! >::mutate(|asset_id| *asset_id += 1); -//! >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); -//! >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); -//! >::insert(asset_id, TOKENS_FIXED_SUPPLY); -//! -//! Self::deposit_event(RawEvent::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); -//! Ok(()) -//! } -//! } -//! } -//! ``` -//! -//! ## Assumptions -//! -//! Below are assumptions that must be held when using this module. If any of -//! them are violated, the behavior of this module is undefined. -//! -//! * The total count of assets should be less than -//! `Trait::AssetId::max_value()`. -//! //! ## Related Modules //! //! * [`System`](../frame_system/index.html) @@ -133,221 +109,1305 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure}; -use sp_runtime::traits::{Member, AtLeast32Bit, AtLeast32BitUnsigned, Zero, StaticLookup}; -use frame_system::ensure_signed; -use sp_runtime::traits::One; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod weights; -/// The module configuration trait. -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +use sp_std::{fmt::Debug, prelude::*}; +use sp_runtime::{ + RuntimeDebug, + traits::{ + AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd, + } +}; +use codec::{Encode, Decode, HasCompact}; +use frame_support::{ + ensure, + traits::{Currency, ReservableCurrency, BalanceStatus::Reserved}, + dispatch::DispatchError, +}; +pub use weights::WeightInfo; - /// The units in which we record balances. - type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; +pub use pallet::*; - /// The arithmetic type of asset identifier. - type AssetId: Parameter + AtLeast32Bit + Default + Copy; -} +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::{ + dispatch::DispatchResultWithPostInfo, + pallet_prelude::*, + }; + use frame_system::pallet_prelude::*; + use super::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + /// The module configuration trait. + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The units in which we record balances. + type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; + + /// The arithmetic type of asset identifier. + type AssetId: Member + Parameter + Default + Copy + HasCompact; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// The origin which may forcibly create or destroy an asset. + type ForceOrigin: EnsureOrigin; + + /// The basic amount of funds that must be reserved when creating a new asset class. + type AssetDepositBase: Get>; + + /// The additional funds that must be reserved for every zombie account that an asset class + /// supports. + type AssetDepositPerZombie: Get>; + + /// The maximum length of a name or symbol stored on-chain. + type StringLimit: Get; + + /// The basic amount of funds that must be reserved when adding metadata to your asset. + type MetadataDepositBase: Get>; + + /// The additional funds that must be reserved for the number of bytes you store in your + /// metadata. + type MetadataDepositPerByte: Get>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + /// Issue a new class of fungible assets from a public origin. + /// + /// This new asset class has no assets initially. + /// + /// The origin must be Signed and the sender must have sufficient funds free. + /// + /// Funds of sender are reserved according to the formula: + /// `AssetDepositBase + AssetDepositPerZombie * max_zombies`. + /// + /// Parameters: + /// - `id`: The identifier of the new asset. This must not be currently in use to identify + /// an existing asset. + /// - `owner`: The owner of this class of assets. The owner has full superuser permissions + /// over this asset, but may later change and configure the permissions using `transfer_ownership` + /// and `set_team`. + /// - `max_zombies`: The total number of accounts which may hold assets in this class yet + /// have no existential deposit. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// + /// Emits `Created` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::create())] + pub(super) fn create( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + admin: ::Source, + max_zombies: u32, + min_balance: T::Balance, + ) -> DispatchResultWithPostInfo { + let owner = ensure_signed(origin)?; + let admin = T::Lookup::lookup(admin)?; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - /// Issue a new class of fungible assets. There are, and will only ever be, `total` - /// such assets and they'll all belong to the `origin` initially. It will have an - /// identifier `AssetId` instance: this will be specified in the `Issued` event. - /// - /// # - /// - `O(1)` - /// - 1 storage mutation (codec `O(1)`). - /// - 2 storage writes (condec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn issue(origin, #[compact] total: T::Balance) { + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + let deposit = T::AssetDepositPerZombie::get() + .saturating_mul(max_zombies.into()) + .saturating_add(T::AssetDepositBase::get()); + T::Currency::reserve(&owner, deposit)?; + + Asset::::insert(id, AssetDetails { + owner: owner.clone(), + issuer: admin.clone(), + admin: admin.clone(), + freezer: admin.clone(), + supply: Zero::zero(), + deposit, + max_zombies, + min_balance, + zombies: Zero::zero(), + accounts: Zero::zero(), + is_frozen: false, + }); + Self::deposit_event(Event::Created(id, owner, admin)); + Ok(().into()) + } + + /// Issue a new class of fungible assets from a privileged origin. + /// + /// This new asset class has no assets initially. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// Unlike `create`, no funds are reserved. + /// + /// - `id`: The identifier of the new asset. This must not be currently in use to identify + /// an existing asset. + /// - `owner`: The owner of this class of assets. The owner has full superuser permissions + /// over this asset, but may later change and configure the permissions using `transfer_ownership` + /// and `set_team`. + /// - `max_zombies`: The total number of accounts which may hold assets in this class yet + /// have no existential deposit. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// + /// Emits `ForceCreated` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_create())] + pub(super) fn force_create( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + #[pallet::compact] max_zombies: u32, + #[pallet::compact] min_balance: T::Balance, + ) -> DispatchResultWithPostInfo { + T::ForceOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; + + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + Asset::::insert(id, AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + max_zombies, + min_balance, + zombies: Zero::zero(), + accounts: Zero::zero(), + is_frozen: false, + }); + Self::deposit_event(Event::ForceCreated(id, owner)); + Ok(().into()) + } + + /// Destroy a class of fungible assets owned by the sender. + /// + /// The origin must be Signed and the sender must be the owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be destroyed. This must identify an existing + /// asset. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(z)` where `z` is the number of zombie accounts. + #[pallet::weight(T::WeightInfo::destroy(*zombies_witness))] + pub(super) fn destroy( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + #[pallet::compact] zombies_witness: u32, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate_exists(id, |maybe_details| { + let details = maybe_details.take().ok_or(Error::::Unknown)?; + ensure!(details.owner == origin, Error::::NoPermission); + ensure!(details.accounts == details.zombies, Error::::RefsLeft); + ensure!(details.zombies <= zombies_witness, Error::::BadWitness); + + let metadata = Metadata::::take(&id); + T::Currency::unreserve(&details.owner, details.deposit.saturating_add(metadata.deposit)); + + *maybe_details = None; + Account::::remove_prefix(&id); + Self::deposit_event(Event::Destroyed(id)); + Ok(().into()) + }) + } + + /// Destroy a class of fungible assets. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// - `id`: The identifier of the asset to be destroyed. This must identify an existing + /// asset. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_destroy(*zombies_witness))] + pub(super) fn force_destroy( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + #[pallet::compact] zombies_witness: u32, + ) -> DispatchResultWithPostInfo { + T::ForceOrigin::ensure_origin(origin)?; + + Asset::::try_mutate_exists(id, |maybe_details| { + let details = maybe_details.take().ok_or(Error::::Unknown)?; + ensure!(details.accounts == details.zombies, Error::::RefsLeft); + ensure!(details.zombies <= zombies_witness, Error::::BadWitness); + + let metadata = Metadata::::take(&id); + T::Currency::unreserve(&details.owner, details.deposit.saturating_add(metadata.deposit)); + + *maybe_details = None; + Account::::remove_prefix(&id); + Self::deposit_event(Event::Destroyed(id)); + Ok(().into()) + }) + } + + /// Mint assets of a particular class. + /// + /// The origin must be Signed and the sender must be the Issuer of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount minted. + /// - `beneficiary`: The account to be credited with the minted assets. + /// - `amount`: The amount of the asset to be minted. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(1)` + /// Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`. + #[pallet::weight(T::WeightInfo::mint())] + pub(super) fn mint( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + beneficiary: ::Source, + #[pallet::compact] amount: T::Balance + ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; - let id = Self::next_asset_id(); - >::mutate(|id| *id += One::one()); + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - >::insert((id, &origin), total); - >::insert(id, total); + ensure!(&origin == &details.issuer, Error::::NoPermission); + details.supply = details.supply.checked_add(&amount).ok_or(Error::::Overflow)?; - Self::deposit_event(RawEvent::Issued(id, origin, total)); + Account::::try_mutate(id, &beneficiary, |t| -> DispatchResultWithPostInfo { + let new_balance = t.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, Error::::BalanceLow); + if t.balance.is_zero() { + t.is_zombie = Self::new_account(&beneficiary, details)?; + } + t.balance = new_balance; + Ok(().into()) + })?; + Self::deposit_event(Event::Issued(id, beneficiary, amount)); + Ok(().into()) + }) } - /// Move some assets from one holder to another. - /// - /// # - /// - `O(1)` - /// - 1 static lookup - /// - 2 storage mutations (codec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn transfer(origin, - #[compact] id: T::AssetId, + /// Reduce the balance of `who` by as much as possible up to `amount` assets of `id`. + /// + /// Origin must be Signed and the sender should be the Manager of the asset `id`. + /// + /// Bails with `BalanceZero` if the `who` is already dead. + /// + /// - `id`: The identifier of the asset to have some amount burned. + /// - `who`: The account to be debited from. + /// - `amount`: The maximum amount by which `who`'s balance should be reduced. + /// + /// Emits `Burned` with the actual amount burned. If this takes the balance to below the + /// minimum for the asset, then the amount burned is increased to take it to zero. + /// + /// Weight: `O(1)` + /// Modes: Post-existence of `who`; Pre & post Zombie-status of `who`. + #[pallet::weight(T::WeightInfo::burn())] + pub(super) fn burn( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + who: ::Source, + #[pallet::compact] amount: T::Balance + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; + + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.admin, Error::::NoPermission); + + let burned = Account::::try_mutate_exists( + id, + &who, + |maybe_account| -> Result { + let mut account = maybe_account.take().ok_or(Error::::BalanceZero)?; + let mut burned = amount.min(account.balance); + account.balance -= burned; + *maybe_account = if account.balance < d.min_balance { + burned += account.balance; + Self::dead_account(&who, d, account.is_zombie); + None + } else { + Some(account) + }; + Ok(burned) + } + )?; + + d.supply = d.supply.saturating_sub(burned); + + Self::deposit_event(Event::Burned(id, who, burned)); + Ok(().into()) + }) + } + + /// Move some assets from the sender account to another. + /// + /// Origin must be Signed. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `target`: The account to be credited. + /// - `amount`: The amount by which the sender's balance of assets should be reduced and + /// `target`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the sender balance above zero but below + /// the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `target`; Post-existence of sender; Prior & post zombie-status + /// of sender; Account pre-existence of `target`. + #[pallet::weight(T::WeightInfo::transfer())] + pub(super) fn transfer( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, target: ::Source, - #[compact] amount: T::Balance - ) { + #[pallet::compact] amount: T::Balance + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + ensure!(!amount.is_zero(), Error::::AmountZero); + + let mut origin_account = Account::::get(id, &origin); + ensure!(!origin_account.is_frozen, Error::::Frozen); + origin_account.balance = origin_account.balance.checked_sub(&amount) + .ok_or(Error::::BalanceLow)?; + + let dest = T::Lookup::lookup(target)?; + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(!details.is_frozen, Error::::Frozen); + + if dest == origin { + return Ok(().into()) + } + + let mut amount = amount; + if origin_account.balance < details.min_balance { + amount += origin_account.balance; + origin_account.balance = Zero::zero(); + } + + Account::::try_mutate(id, &dest, |a| -> DispatchResultWithPostInfo { + let new_balance = a.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, Error::::BalanceLow); + if a.balance.is_zero() { + a.is_zombie = Self::new_account(&dest, details)?; + } + a.balance = new_balance; + Ok(().into()) + })?; + + match origin_account.balance.is_zero() { + false => { + Self::dezombify(&origin, details, &mut origin_account.is_zombie); + Account::::insert(id, &origin, &origin_account) + } + true => { + Self::dead_account(&origin, details, origin_account.is_zombie); + Account::::remove(id, &origin); + } + } + + Self::deposit_event(Event::Transferred(id, origin, dest, amount)); + Ok(().into()) + }) + } + + /// Move some assets from one account to another. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `source`: The account to be debited. + /// - `dest`: The account to be credited. + /// - `amount`: The amount by which the `source`'s balance of assets should be reduced and + /// `dest`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the `source` balance above zero but + /// below the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `dest`; Post-existence of `source`; Prior & post zombie-status + /// of `source`; Account pre-existence of `dest`. + #[pallet::weight(T::WeightInfo::force_transfer())] + pub(super) fn force_transfer( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + source: ::Source, + dest: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; - let origin_account = (id, origin.clone()); - let origin_balance = >::get(&origin_account); - let target = T::Lookup::lookup(target)?; + + let source = T::Lookup::lookup(source)?; + let mut source_account = Account::::get(id, &source); + let mut amount = amount.min(source_account.balance); ensure!(!amount.is_zero(), Error::::AmountZero); - ensure!(origin_balance >= amount, Error::::BalanceLow); - Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); - >::insert(origin_account, origin_balance - amount); - >::mutate((id, target), |balance| *balance += amount); + let dest = T::Lookup::lookup(dest)?; + if dest == source { + return Ok(().into()) + } + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); + + source_account.balance -= amount; + if source_account.balance < details.min_balance { + amount += source_account.balance; + source_account.balance = Zero::zero(); + } + + Account::::try_mutate(id, &dest, |a| -> DispatchResultWithPostInfo { + let new_balance = a.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, Error::::BalanceLow); + if a.balance.is_zero() { + a.is_zombie = Self::new_account(&dest, details)?; + } + a.balance = new_balance; + Ok(().into()) + })?; + + match source_account.balance.is_zero() { + false => { + Self::dezombify(&source, details, &mut source_account.is_zombie); + Account::::insert(id, &source, &source_account) + } + true => { + Self::dead_account(&source, details, source_account.is_zombie); + Account::::remove(id, &source); + } + } + + Self::deposit_event(Event::ForceTransferred(id, source, dest, amount)); + Ok(().into()) + }) + } + + /// Disallow further unprivileged transfers from an account. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `who`: The account to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze())] + pub(super) fn freeze( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + who: ::Source + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); + let who = T::Lookup::lookup(who)?; + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + + Account::::mutate(id, &who, |a| a.is_frozen = true); + + Self::deposit_event(Event::::Frozen(id, who)); + Ok(().into()) + } + + /// Allow unprivileged transfers from an account again. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `who`: The account to be unfrozen. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw())] + pub(super) fn thaw( + origin: OriginFor, + #[pallet::compact] + id: T::AssetId, + who: ::Source + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + + let details = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); + let who = T::Lookup::lookup(who)?; + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + + Account::::mutate(id, &who, |a| a.is_frozen = false); + + Self::deposit_event(Event::::Thawed(id, who)); + Ok(().into()) + } + + /// Disallow further unprivileged transfers for the asset class. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze_asset())] + pub(super) fn freeze_asset( + origin: OriginFor, + #[pallet::compact] id: T::AssetId + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); + + d.is_frozen = true; + + Self::deposit_event(Event::::AssetFrozen(id)); + Ok(().into()) + }) + } + + /// Allow unprivileged transfers for the asset again. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw_asset())] + pub(super) fn thaw_asset( + origin: OriginFor, + #[pallet::compact] id: T::AssetId + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.admin, Error::::NoPermission); + + d.is_frozen = false; + + Self::deposit_event(Event::::AssetThawed(id)); + Ok(().into()) + }) + } + + /// Change the Owner of an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `owner`: The new Owner of this asset. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer_ownership())] + pub(super) fn transfer_ownership( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + if details.owner == owner { return Ok(().into()) } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved(&details.owner, &owner, details.deposit, Reserved)?; + + details.owner = owner.clone(); + + Self::deposit_event(Event::OwnerChanged(id, owner)); + Ok(().into()) + }) + } + + /// Change the Issuer, Admin and Freezer of an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `issuer`: The new Issuer of this asset. + /// - `admin`: The new Admin of this asset. + /// - `freezer`: The new Freezer of this asset. + /// + /// Emits `TeamChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_team())] + pub(super) fn set_team( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + + details.issuer = issuer.clone(); + details.admin = admin.clone(); + details.freezer = freezer.clone(); + + Self::deposit_event(Event::TeamChanged(id, issuer, admin, freezer)); + Ok(().into()) + }) } - /// Destroy any assets of `id` owned by `origin`. + /// Set the maximum number of zombie accounts for an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// Funds of sender are reserved according to the formula: + /// `AssetDepositBase + AssetDepositPerZombie * max_zombies` taking into account + /// any already reserved funds. + /// + /// - `id`: The identifier of the asset to update zombie count. + /// - `max_zombies`: The new number of zombies allowed for this asset. /// - /// # - /// - `O(1)` - /// - 1 storage mutation (codec `O(1)`). - /// - 1 storage deletion (codec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn destroy(origin, #[compact] id: T::AssetId) { + /// Emits `MaxZombiesChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_max_zombies())] + pub(super) fn set_max_zombies( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + #[pallet::compact] max_zombies: u32, + ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; - let balance = >::take((id, &origin)); - ensure!(!balance.is_zero(), Error::::BalanceZero); - >::mutate(id, |total_supply| *total_supply -= balance); - Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + ensure!(max_zombies >= details.zombies, Error::::TooManyZombies); + + let new_deposit = T::AssetDepositPerZombie::get() + .saturating_mul(max_zombies.into()) + .saturating_add(T::AssetDepositBase::get()); + + if new_deposit > details.deposit { + T::Currency::reserve(&origin, new_deposit - details.deposit)?; + } else { + T::Currency::unreserve(&origin, details.deposit - new_deposit); + } + + details.max_zombies = max_zombies; + + Self::deposit_event(Event::MaxZombiesChanged(id, max_zombies)); + Ok(().into()) + }) } + + /// Set the metadata for an asset. + /// + /// NOTE: There is no `unset_metadata` call. Simply pass an empty name, symbol, + /// and 0 decimals to this function to remove the metadata of an asset and + /// return your deposit. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// Funds of sender are reserved according to the formula: + /// `MetadataDepositBase + MetadataDepositPerByte * (name.len + symbol.len)` taking into + /// account any already reserved funds. + /// + /// - `id`: The identifier of the asset to update. + /// - `name`: The user friendly name of this asset. Limited in length by `StringLimit`. + /// - `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`. + /// - `decimals`: The number of decimals this asset uses to represent one unit. + /// + /// Emits `MaxZombiesChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_metadata(name.len() as u32, symbol.len() as u32))] + pub(super) fn set_metadata( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + name: Vec, + symbol: Vec, + decimals: u8, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + + ensure!(name.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); + ensure!(symbol.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.owner, Error::::NoPermission); + + Metadata::::try_mutate_exists(id, |metadata| { + let bytes_used = name.len() + symbol.len(); + let old_deposit = match metadata { + Some(m) => m.deposit, + None => Default::default() + }; + + // Metadata is being removed + if bytes_used.is_zero() && decimals.is_zero() { + T::Currency::unreserve(&origin, old_deposit); + *metadata = None; + } else { + let new_deposit = T::MetadataDepositPerByte::get() + .saturating_mul(((name.len() + symbol.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + + if new_deposit > old_deposit { + T::Currency::reserve(&origin, new_deposit - old_deposit)?; + } else { + T::Currency::unreserve(&origin, old_deposit - new_deposit); + } + + *metadata = Some(AssetMetadata { + deposit: new_deposit, + name: name.clone(), + symbol: symbol.clone(), + decimals, + }) + } + + Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals)); + Ok(().into()) + }) + } + } -} -decl_event! { - pub enum Event where - ::AccountId, - ::Balance, - ::AssetId, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", T::Balance = "Balance", T::AssetId = "AssetId")] + pub enum Event { + /// Some asset class was created. \[asset_id, creator, owner\] + Created(T::AssetId, T::AccountId, T::AccountId), /// Some assets were issued. \[asset_id, owner, total_supply\] - Issued(AssetId, AccountId, Balance), + Issued(T::AssetId, T::AccountId, T::Balance), /// Some assets were transferred. \[asset_id, from, to, amount\] - Transferred(AssetId, AccountId, AccountId, Balance), + Transferred(T::AssetId, T::AccountId, T::AccountId, T::Balance), /// Some assets were destroyed. \[asset_id, owner, balance\] - Destroyed(AssetId, AccountId, Balance), + Burned(T::AssetId, T::AccountId, T::Balance), + /// The management team changed \[asset_id, issuer, admin, freezer\] + TeamChanged(T::AssetId, T::AccountId, T::AccountId, T::AccountId), + /// The owner changed \[asset_id, owner\] + OwnerChanged(T::AssetId, T::AccountId), + /// Some assets was transferred by an admin. \[asset_id, from, to, amount\] + ForceTransferred(T::AssetId, T::AccountId, T::AccountId, T::Balance), + /// Some account `who` was frozen. \[asset_id, who\] + Frozen(T::AssetId, T::AccountId), + /// Some account `who` was thawed. \[asset_id, who\] + Thawed(T::AssetId, T::AccountId), + /// Some asset `asset_id` was frozen. \[asset_id\] + AssetFrozen(T::AssetId), + /// Some asset `asset_id` was thawed. \[asset_id\] + AssetThawed(T::AssetId), + /// An asset class was destroyed. + Destroyed(T::AssetId), + /// Some asset class was force-created. \[asset_id, owner\] + ForceCreated(T::AssetId, T::AccountId), + /// The maximum amount of zombies allowed has changed. \[asset_id, max_zombies\] + MaxZombiesChanged(T::AssetId, u32), + /// New metadata has been set for an asset. \[asset_id, name, symbol, decimals\] + MetadataSet(T::AssetId, Vec, Vec, u8), } -} -decl_error! { - pub enum Error for Module { - /// Transfer amount should be non-zero + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// Transfer amount should be non-zero. AmountZero, - /// Account balance must be greater than or equal to the transfer amount + /// Account balance must be greater than or equal to the transfer amount. BalanceLow, - /// Balance should be non-zero + /// Balance should be non-zero. BalanceZero, + /// The signing account has no permission to do the operation. + NoPermission, + /// The given asset ID is unknown. + Unknown, + /// The origin account is frozen. + Frozen, + /// The asset ID is already taken. + InUse, + /// Too many zombie accounts in use. + TooManyZombies, + /// Attempt to destroy an asset class when non-zombie, reference-bearing accounts exist. + RefsLeft, + /// Invalid witness data given. + BadWitness, + /// Minimum balance should be non-zero. + MinBalanceZero, + /// A mint operation lead to an overflow. + Overflow, + /// Some internal state is broken. + BadState, + /// Invalid metadata given. + BadMetadata, } + + #[pallet::storage] + /// Details of an asset. + pub(super) type Asset = StorageMap< + _, + Blake2_128Concat, + T::AssetId, + AssetDetails> + >; + #[pallet::storage] + /// The number of units of assets held by any given account. + pub(super) type Account = StorageDoubleMap< + _, + Blake2_128Concat, + T::AssetId, + Blake2_128Concat, + T::AccountId, + AssetBalance, + ValueQuery + >; + #[pallet::storage] + /// Metadata of an asset. + pub(super) type Metadata = StorageMap< + _, + Blake2_128Concat, + T::AssetId, + AssetMetadata>, + ValueQuery + >; } -decl_storage! { - trait Store for Module as Assets { - /// The number of units of assets held by any given account. - Balances: map hasher(blake2_128_concat) (T::AssetId, T::AccountId) => T::Balance; - /// The next asset identifier up for grabs. - NextAssetId get(fn next_asset_id): T::AssetId; - /// The total unit supply of an asset. - /// - /// TWOX-NOTE: `AssetId` is trusted, so this is safe. - TotalSupply: map hasher(twox_64_concat) T::AssetId => T::Balance; - } +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct AssetDetails< + Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, + AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, + DepositBalance: Encode + Decode + Clone + Debug + Eq + PartialEq, +> { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + owner: AccountId, + /// Can mint tokens. + issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + admin: AccountId, + /// Can freeze tokens. + freezer: AccountId, + /// The total supply across all accounts. + supply: Balance, + /// The balance deposited for this asset. + /// + /// This pays for the data stored here together with any virtual accounts. + deposit: DepositBalance, + /// The number of balance-holding accounts that this asset may have, excluding those that were + /// created when they had a system-level ED. + max_zombies: u32, + /// The ED for virtual accounts. + min_balance: Balance, + /// The current number of zombie accounts. + zombies: u32, + /// The total number of accounts. + accounts: u32, + /// Whether the asset is frozen for permissionless transfers. + is_frozen: bool, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetBalance< + Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, +> { + /// The balance. + balance: Balance, + /// Whether the account is frozen. + is_frozen: bool, + /// Whether the account is a zombie. If not, then it has a reference. + is_zombie: bool, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetMetadata { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + deposit: DepositBalance, + /// The user friendly name of this asset. Limited in length by `StringLimit`. + name: Vec, + /// The ticker symbol for this asset. Limited in length by `StringLimit`. + symbol: Vec, + /// The number of decimals this asset uses to represent one unit. + decimals: u8, } // The main implementation block for the module. -impl Module { +impl Pallet { // Public immutables /// Get the asset `id` balance of `who`. pub fn balance(id: T::AssetId, who: T::AccountId) -> T::Balance { - >::get((id, who)) + Account::::get(id, who).balance } /// Get the total supply of an asset `id`. pub fn total_supply(id: T::AssetId) -> T::Balance { - >::get(id) + Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) + } + + /// Check the number of zombies allow yet for an asset. + pub fn zombie_allowance(id: T::AssetId) -> u32 { + Asset::::get(id).map(|x| x.max_zombies - x.zombies).unwrap_or_else(Zero::zero) + } + + fn new_account( + who: &T::AccountId, + d: &mut AssetDetails>, + ) -> Result { + let accounts = d.accounts.checked_add(1).ok_or(Error::::Overflow)?; + let r = Ok(if frame_system::Module::::account_exists(who) { + frame_system::Module::::inc_consumers(who).map_err(|_| Error::::BadState)?; + false + } else { + ensure!(d.zombies < d.max_zombies, Error::::TooManyZombies); + d.zombies += 1; + true + }); + d.accounts = accounts; + r + } + + /// If `who`` exists in system and it's a zombie, dezombify it. + fn dezombify( + who: &T::AccountId, + d: &mut AssetDetails>, + is_zombie: &mut bool, + ) { + if *is_zombie && frame_system::Module::::account_exists(who) { + // If the account exists, then it should have at least one provider + // so this cannot fail... but being defensive anyway. + let _ = frame_system::Module::::inc_consumers(who); + *is_zombie = false; + d.zombies = d.zombies.saturating_sub(1); + } + } + + fn dead_account( + who: &T::AccountId, + d: &mut AssetDetails>, + is_zombie: bool, + ) { + if is_zombie { + d.zombies = d.zombies.saturating_sub(1); + } else { + frame_system::Module::::dec_consumers(who); + } + d.accounts = d.accounts.saturating_sub(1); } } #[cfg(test)] mod tests { use super::*; + use crate as pallet_assets; - use frame_support::{impl_outer_origin, assert_ok, assert_noop, parameter_types, weights::Weight}; + use frame_support::{assert_ok, assert_noop, parameter_types}; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use pallet_balances::Error as BalancesError; - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Assets: pallet_assets::{Module, Call, Storage, Event}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; - type Call = (); + type Call = Call; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); - type AccountData = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + } + + parameter_types! { + pub const ExistentialDeposit: u64 = 1; } - impl Trait for Test { - type Event = (); + + impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + } + + parameter_types! { + pub const AssetDepositBase: u64 = 1; + pub const AssetDepositPerZombie: u64 = 1; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: u64 = 1; + pub const MetadataDepositPerByte: u64 = 1; + } + + impl Config for Test { + type Currency = Balances; + type Event = Event; type Balance = u64; type AssetId = u32; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDepositBase = AssetDepositBase; + type AssetDepositPerZombie = AssetDepositPerZombie; + type StringLimit = StringLimit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type WeightInfo = (); } - type Assets = Module; - fn new_test_ext() -> sp_io::TestExternalities { + pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() } #[test] - fn issuing_asset_units_to_issuer_should_work() { + fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_eq!(Assets::balance(0, 2), 100); + }); + } + + #[test] + fn lifecycle_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 10, 1)); + assert_eq!(Balances::reserved_balance(&1), 11); + assert!(Asset::::contains_key(0)); + + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); + assert_eq!(Balances::reserved_balance(&1), 14); + assert!(Metadata::::contains_key(0)); + + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); + assert_eq!(Account::::iter_prefix(0).count(), 2); + + assert_ok!(Assets::destroy(Origin::signed(1), 0, 100)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Asset::::contains_key(0)); + assert!(!Metadata::::contains_key(0)); + assert_eq!(Account::::iter_prefix(0).count(), 0); + + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 10, 1)); + assert_eq!(Balances::reserved_balance(&1), 11); + assert!(Asset::::contains_key(0)); + + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); + assert_eq!(Balances::reserved_balance(&1), 14); + assert!(Metadata::::contains_key(0)); + + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); + assert_eq!(Account::::iter_prefix(0).count(), 2); + + assert_ok!(Assets::force_destroy(Origin::root(), 0, 100)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Asset::::contains_key(0)); + assert!(!Metadata::::contains_key(0)); + assert_eq!(Account::::iter_prefix(0).count(), 0); + }); + } + + #[test] + fn destroy_with_non_zombies_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_noop!(Assets::destroy(Origin::signed(1), 0, 100), Error::::RefsLeft); + assert_noop!(Assets::force_destroy(Origin::root(), 0, 100), Error::::RefsLeft); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::destroy(Origin::signed(1), 0, 100)); + }); + } + + #[test] + fn destroy_with_bad_witness_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_noop!(Assets::destroy(Origin::signed(1), 0, 0), Error::::BadWitness); + assert_noop!(Assets::force_destroy(Origin::root(), 0, 0), Error::::BadWitness); + }); + } + + #[test] + fn max_zombies_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 2, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + + assert_eq!(Assets::zombie_allowance(0), 0); + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 100), Error::::TooManyZombies); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::TooManyZombies); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 50), Error::::TooManyZombies); + + Balances::make_free_balance_be(&3, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 3, 100)); + + assert_ok!(Assets::transfer(Origin::signed(0), 0, 1, 100)); + assert_eq!(Assets::zombie_allowance(0), 1); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); + } + + #[test] + fn resetting_max_zombies_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 2, 1)); + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 3, 100)); + + assert_eq!(Assets::zombie_allowance(0), 0); + + assert_noop!(Assets::set_max_zombies(Origin::signed(1), 0, 1), Error::::TooManyZombies); + + assert_ok!(Assets::set_max_zombies(Origin::signed(1), 0, 3)); + assert_eq!(Assets::zombie_allowance(0), 1); + }); + } + + #[test] + fn dezombifying_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::zombie_allowance(0), 9); + + // introduce a bit of balance for account 2. + Balances::make_free_balance_be(&2, 100); + + // transfer 25 units, nothing changes. + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 25)); + assert_eq!(Assets::zombie_allowance(0), 9); + + // introduce a bit of balance; this will create the account. + Balances::make_free_balance_be(&1, 100); + + // now transferring 25 units will create it. + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 25)); + assert_eq!(Assets::zombie_allowance(0), 10); + }); + } + + #[test] + fn min_balance_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + // Cannot create a new account with a balance that is below minimum... + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), Error::::BalanceLow); + + // When deducting from an account to below minimum, it should be reaped. + + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); + assert!(Assets::balance(0, 1).is_zero()); + assert_eq!(Assets::balance(0, 2), 100); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 2, 1, 91)); + assert!(Assets::balance(0, 2).is_zero()); + assert_eq!(Assets::balance(0, 1), 100); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 91)); + assert!(Assets::balance(0, 1).is_zero()); + assert_eq!(Asset::::get(0).unwrap().accounts, 0); }); } #[test] fn querying_total_supply_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); @@ -356,15 +1416,16 @@ mod tests { assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 19); assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::destroy(Origin::signed(3), 0)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::max_value())); assert_eq!(Assets::total_supply(0), 69); }); } #[test] - fn transferring_amount_above_available_balance_should_work() { + fn transferring_amount_below_available_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); @@ -372,24 +1433,119 @@ mod tests { }); } + #[test] + fn transferring_frozen_user_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 1)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw(Origin::signed(1), 0, 1)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); + } + + #[test] + fn transferring_frozen_asset_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); + } + + #[test] + fn origin_guards_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_noop!(Assets::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Assets::freeze(Origin::signed(2), 0, 1), Error::::NoPermission); + assert_noop!(Assets::thaw(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::mint(Origin::signed(2), 0, 2, 100), Error::::NoPermission); + assert_noop!(Assets::burn(Origin::signed(2), 0, 1, 100), Error::::NoPermission); + assert_noop!(Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), Error::::NoPermission); + assert_noop!(Assets::set_max_zombies(Origin::signed(2), 0, 11), Error::::NoPermission); + assert_noop!(Assets::destroy(Origin::signed(2), 0, 100), Error::::NoPermission); + }); + } + + #[test] + fn transfer_owner_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 1); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 10, 1)); + + assert_eq!(Balances::reserved_balance(&1), 11); + + assert_ok!(Assets::transfer_ownership(Origin::signed(1), 0, 2)); + assert_eq!(Balances::reserved_balance(&2), 11); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert_noop!(Assets::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission); + + assert_ok!(Assets::transfer_ownership(Origin::signed(2), 0, 1)); + assert_eq!(Balances::reserved_balance(&1), 11); + assert_eq!(Balances::reserved_balance(&2), 0); + }); + } + + #[test] + fn set_team_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Assets::mint(Origin::signed(2), 0, 2, 100)); + assert_ok!(Assets::freeze(Origin::signed(4), 0, 2)); + assert_ok!(Assets::thaw(Origin::signed(3), 0, 2)); + assert_ok!(Assets::force_transfer(Origin::signed(3), 0, 2, 3, 100)); + assert_ok!(Assets::burn(Origin::signed(3), 0, 3, 100)); + }); + } + + #[test] + fn transferring_to_frozen_account_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_eq!(Assets::balance(0, 2), 100); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 2)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 2), 150); + }); + } + #[test] fn transferring_amount_more_than_available_balance_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::max_value())); assert_eq!(Assets::balance(0, 1), 0); assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::BalanceLow); + assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 51), Error::::BalanceLow); }); } #[test] fn transferring_less_than_one_unit_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 0), Error::::AmountZero); }); @@ -398,27 +1554,80 @@ mod tests { #[test] fn transferring_more_units_than_total_supply_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); }); } #[test] - fn destroying_asset_balance_with_positive_balance_should_work() { + fn burning_asset_balance_with_positive_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::max_value())); + assert_eq!(Assets::balance(0, 1), 0); }); } #[test] - fn destroying_asset_balance_with_zero_balance_should_not_work() { + fn burning_asset_balance_with_zero_balance_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::destroy(Origin::signed(2), 0), Error::::BalanceZero); + assert_noop!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value()), Error::::BalanceZero); + }); + } + + #[test] + fn set_metadata_should_work() { + new_test_ext().execute_with(|| { + // Cannot add metadata to unknown asset + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), + Error::::Unknown, + ); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + // Cannot add metadata to unowned asset + assert_noop!( + Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), + Error::::NoPermission, + ); + + // Cannot add oversized metadata + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), + Error::::BadMetadata, + ); + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), + Error::::BadMetadata, + ); + + // Successfully add metadata and take deposit + Balances::make_free_balance_be(&1, 30); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12)); + assert_eq!(Balances::free_balance(&1), 9); + + // Update deposit + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 5], 12)); + assert_eq!(Balances::free_balance(&1), 14); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 15], 12)); + assert_eq!(Balances::free_balance(&1), 4); + + // Cannot over-reserve + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), + BalancesError::::InsufficientBalance, + ); + + // Clear Metadata + assert!(Metadata::::contains_key(0)); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![], vec![], 0)); + assert!(!Metadata::::contains_key(0)); }); } } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs new file mode 100644 index 0000000000000..1858fe708e14c --- /dev/null +++ b/frame/assets/src/weights.rs @@ -0,0 +1,252 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_assets +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 +//! DATE: 2021-01-18, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_assets +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/assets/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_assets. +pub trait WeightInfo { + fn create() -> Weight; + fn force_create() -> Weight; + fn destroy(z: u32, ) -> Weight; + fn force_destroy(z: u32, ) -> Weight; + fn mint() -> Weight; + fn burn() -> Weight; + fn transfer() -> Weight; + fn force_transfer() -> Weight; + fn freeze() -> Weight; + fn thaw() -> Weight; + fn freeze_asset() -> Weight; + fn thaw_asset() -> Weight; + fn transfer_ownership() -> Weight; + fn set_team() -> Weight; + fn set_max_zombies() -> Weight; + fn set_metadata(n: u32, s: u32, ) -> Weight; +} + +/// Weights for pallet_assets using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn create() -> Weight { + (44_459_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn force_create() -> Weight { + (21_480_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn destroy(z: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((1_149_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn force_destroy(z: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((1_146_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn mint() -> Weight { + (32_995_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn burn() -> Weight { + (29_245_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn transfer() -> Weight { + (42_211_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn force_transfer() -> Weight { + (42_218_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn freeze() -> Weight { + (31_079_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn thaw() -> Weight { + (30_853_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn freeze_asset() -> Weight { + (22_383_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn thaw_asset() -> Weight { + (22_341_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn transfer_ownership() -> Weight { + (22_782_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn set_team() -> Weight { + (23_293_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn set_max_zombies() -> Weight { + (44_525_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn set_metadata(n: u32, s: u32, ) -> Weight { + (49_456_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 0 + .saturating_add((6_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn create() -> Weight { + (44_459_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn force_create() -> Weight { + (21_480_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn destroy(z: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((1_149_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn force_destroy(z: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((1_146_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn mint() -> Weight { + (32_995_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn burn() -> Weight { + (29_245_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn transfer() -> Weight { + (42_211_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn force_transfer() -> Weight { + (42_218_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn freeze() -> Weight { + (31_079_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn thaw() -> Weight { + (30_853_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn freeze_asset() -> Weight { + (22_383_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn thaw_asset() -> Weight { + (22_341_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn transfer_ownership() -> Weight { + (22_782_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn set_team() -> Weight { + (23_293_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn set_max_zombies() -> Weight { + (44_525_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn set_metadata(n: u32, s: u32, ) -> Weight { + (49_456_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 0 + .saturating_add((6_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index a65632289426b..99ce41f39939a 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-atomic-swap" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/atomic-swap/README.md b/frame/atomic-swap/README.md index 1287e90bc0da5..5dd502095d792 100644 --- a/frame/atomic-swap/README.md +++ b/frame/atomic-swap/README.md @@ -20,4 +20,4 @@ claimed within a specified duration of time, the sender may cancel it. * `claim_swap` - called by the target to approve a swap * `cancel_swap` - may be called by a sender after a specified duration -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 31f0c0f426525..e6d44d73c40d2 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ //! //! A module for atomically sending funds. //! -//! - [`atomic_swap::Trait`](./trait.Trait.html) +//! - [`atomic_swap::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -56,7 +56,7 @@ use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] -pub struct PendingSwap { +pub struct PendingSwap { /// Source of the swap. pub source: T::AccountId, /// Action of this swap. @@ -74,7 +74,7 @@ pub type HashedProof = [u8; 32]; /// succeeds with best efforts. /// - **Claim**: claim any resources reserved in the first phrase. /// - **Cancel**: cancel any resources reserved in the first phrase. -pub trait SwapAction { +pub trait SwapAction { /// Reserve the resources needed for the swap, from the given `source`. The reservation is /// allowed to fail. If that is the case, the the full swap creation operation is cancelled. fn reserve(&self, source: &AccountId) -> DispatchResult; @@ -115,7 +115,7 @@ impl DerefMut for BalanceSwapAction where C: Reserva } } -impl SwapAction for BalanceSwapAction +impl SwapAction for BalanceSwapAction where C: ReservableCurrency { fn reserve(&self, source: &AccountId) -> DispatchResult { @@ -136,9 +136,9 @@ impl SwapAction for BalanceSwapAction> + Into<::Event>; + type Event: From> + Into<::Event>; /// Swap action. type SwapAction: SwapAction + Parameter; /// Limit of proof size. @@ -155,7 +155,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as AtomicSwap { + trait Store for Module as AtomicSwap { pub PendingSwaps: double_map hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof => Option>; @@ -163,7 +163,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Swap already exists. AlreadyExist, /// Swap proof is invalid. @@ -186,7 +186,7 @@ decl_error! { decl_event!( /// Event of atomic swap pallet. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, PendingSwap = PendingSwap, { /// Swap created. \[account, proof, swap\] @@ -201,7 +201,7 @@ decl_event!( decl_module! { /// Module definition of atomic swap pallet. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 060411c8815da..977b17f8710e3 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -1,64 +1,67 @@ #![cfg(test)] use super::*; +use crate as pallet_atomic_swap; -use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, -}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + AtomicSwap: pallet_atomic_swap::{Module, Call, Event}, + } +); -#[derive(Clone, Eq, Debug, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); - type Event = (); + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -67,14 +70,11 @@ parameter_types! { pub const ProofLimit: u32 = 1024; pub const ExpireDuration: u64 = 100; } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type SwapAction = BalanceSwapAction; type ProofLimit = ProofLimit; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type AtomicSwap = Module; const A: u64 = 1; const B: u64 = 2; diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 27a579e0f9f8b..80ea164cf0f5f 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-aura" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,32 +13,29 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -sp-consensus-aura = { version = "0.8.0", path = "../../primitives/consensus/aura", default-features = false } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } - +pallet-session = { version = "3.0.0", default-features = false, path = "../session" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +sp-consensus-aura = { version = "0.9.0", path = "../../primitives/consensus/aura", default-features = false } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../primitives/timestamp" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../timestamp" } [dev-dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-io ={ version = "3.0.0", path = "../../primitives/io" } lazy_static = "1.4.0" -parking_lot = "0.10.0" +parking_lot = "0.11.1" [features] default = ["std"] std = [ "sp-application-crypto/std", "codec/std", - "sp-inherents/std", "sp-std/std", "serde", "sp-runtime/std", diff --git a/frame/aura/README.md b/frame/aura/README.md index 4f3eacbad8a06..73ed986dd734d 100644 --- a/frame/aura/README.md +++ b/frame/aura/README.md @@ -25,4 +25,4 @@ If you're interested in hacking on this module, it is useful to understand the i [`ProvideInherent`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherent.html) and [`ProvideInherentData`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherentData.html) to create and check inherents. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index e8e0e616bc0dc..db639a4499beb 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Aura Module //! -//! - [`aura::Trait`](./trait.Trait.html) +//! - [`aura::Config`](./trait.Config.html) //! - [`Module`](./struct.Module.html) //! //! ## Overview @@ -34,62 +34,94 @@ //! //! - [Timestamp](../pallet_timestamp/index.html): The Timestamp module is used in Aura to track //! consensus rounds (via `slots`). -//! -//! ## References -//! -//! If you're interested in hacking on this module, it is useful to understand the interaction with -//! `substrate/primitives/inherents/src/lib.rs` and, specifically, the required implementation of -//! [`ProvideInherent`](../sp_inherents/trait.ProvideInherent.html) and -//! [`ProvideInherentData`](../sp_inherents/trait.ProvideInherentData.html) to create and check inherents. #![cfg_attr(not(feature = "std"), no_std)] -use pallet_timestamp; - -use sp_std::{result, prelude::*}; +use sp_std::prelude::*; use codec::{Encode, Decode}; -use frame_support::{ - decl_storage, decl_module, Parameter, traits::{Get, FindAuthor}, - ConsensusEngineId, -}; +use frame_support::{Parameter, traits::{Get, FindAuthor, OneSessionHandler}, ConsensusEngineId}; use sp_runtime::{ RuntimeAppPublic, traits::{SaturatedConversion, Saturating, Zero, Member, IsMember}, generic::DigestItem, }; use sp_timestamp::OnTimestampSet; -use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; -use sp_consensus_aura::{ - AURA_ENGINE_ID, ConsensusLog, AuthorityIndex, - inherents::{INHERENT_IDENTIFIER, AuraInherentData}, -}; +use sp_consensus_aura::{AURA_ENGINE_ID, ConsensusLog, AuthorityIndex, Slot}; mod mock; mod tests; +pub mod migrations; -pub trait Trait: pallet_timestamp::Trait { - /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default; -} +pub use pallet::*; -decl_storage! { - trait Store for Module as Aura { - /// The last timestamp. - LastTimestamp get(fn last): T::Moment; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// The current authorities - pub Authorities get(fn authorities): Vec; + #[pallet::config] + pub trait Config: pallet_timestamp::Config + frame_system::Config { + /// The identifier type for an authority. + type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize; } - add_extra_genesis { - config(authorities): Vec; - build(|config| Module::::initialize_authorities(&config.authorities)) + + #[pallet::pallet] + pub struct Pallet(sp_std::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_: T::BlockNumber) -> Weight { + if let Some(new_slot) = Self::current_slot_from_digests() { + let current_slot = CurrentSlot::::get(); + + assert!(current_slot < new_slot, "Slot must increase"); + CurrentSlot::::put(new_slot); + + // TODO [#3398] Generate offence report for all authorities that skipped their slots. + + T::DbWeight::get().reads_writes(2, 1) + } else { + T::DbWeight::get().reads(1) + } + } } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { } + #[pallet::call] + impl Pallet {} + + /// The current authority set. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub(super) type Authorities = StorageValue<_, Vec, ValueQuery>; + + /// The current slot of this block. + /// + /// This will be set in `on_initialize`. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub(super) type CurrentSlot = StorageValue<_, Slot, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { authorities: Vec::new() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_authorities(&self.authorities); + } + } } -impl Module { +impl Pallet { fn change_authorities(new: Vec) { >::put(&new); @@ -106,13 +138,33 @@ impl Module { >::put(authorities); } } + + /// Get the current slot from the pre-runtime digests. + fn current_slot_from_digests() -> Option { + let digest = frame_system::Pallet::::digest(); + let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); + for (id, mut data) in pre_runtime_digests { + if id == AURA_ENGINE_ID { + return Slot::decode(&mut data).ok(); + } + } + + None + } + + /// Determine the Aura slot-duration based on the Timestamp module configuration. + pub fn slot_duration() -> T::Moment { + // we double the minimum block-period so each author can always propose within + // the majority of its slot. + ::MinimumPeriod::get().saturating_mul(2u32.into()) + } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = T::AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -128,7 +180,7 @@ impl pallet_session::OneSessionHandler for Module { // instant changes if changed { let next_authorities = validators.map(|(_, k)| k).collect::>(); - let last_authorities = >::authorities(); + let last_authorities = Self::authorities(); if next_authorities != last_authorities { Self::change_authorities(next_authorities); } @@ -145,16 +197,15 @@ impl pallet_session::OneSessionHandler for Module { } } -impl FindAuthor for Module { +impl FindAuthor for Pallet { fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator { for (id, mut data) in digests.into_iter() { if id == AURA_ENGINE_ID { - if let Ok(slot_num) = u64::decode(&mut data) { - let author_index = slot_num % Self::authorities().len() as u64; - return Some(author_index as u32) - } + let slot = Slot::decode(&mut data).ok()?; + let author_index = *slot % Self::authorities().len() as u64; + return Some(author_index as u32) } } @@ -162,12 +213,12 @@ impl FindAuthor for Module { } } -/// We can not implement `FindAuthor` twice, because the compiler does not know if +/// We can not implement `FindAuthor` twice, because the compiler does not know if /// `u32 == T::AuthorityId` and thus, prevents us to implement the trait twice. #[doc(hidden)] pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); -impl> FindAuthor +impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option @@ -175,15 +226,15 @@ impl> FindAuthor { let i = Inner::find_author(digests)?; - let validators = >::authorities(); + let validators = >::authorities(); validators.get(i as usize).map(|k| k.clone()) } } /// Find the authority ID of the Aura authority who authored the current block. -pub type AuraAuthorId = FindAccountFromAuthorIndex>; +pub type AuraAuthorId = FindAccountFromAuthorIndex>; -impl IsMember for Module { +impl IsMember for Pallet { fn is_member(authority_id: &T::AuthorityId) -> bool { Self::authorities() .iter() @@ -191,63 +242,14 @@ impl IsMember for Module { } } -impl Module { - /// Determine the Aura slot-duration based on the Timestamp module configuration. - pub fn slot_duration() -> T::Moment { - // we double the minimum block-period so each author can always propose within - // the majority of its slot. - ::MinimumPeriod::get().saturating_mul(2u32.into()) - } - - fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { - let last = Self::last(); - ::LastTimestamp::put(now); - - if last.is_zero() { - return; - } - - assert!(!slot_duration.is_zero(), "Aura slot duration cannot be zero."); - - let last_slot = last / slot_duration; - let cur_slot = now / slot_duration; - - assert!(last_slot < cur_slot, "Only one block may be authored per slot."); - - // TODO [#3398] Generate offence report for all authorities that skipped their slots. - } -} - -impl OnTimestampSet for Module { +impl OnTimestampSet for Pallet { fn on_timestamp_set(moment: T::Moment) { - Self::on_timestamp_set(moment, Self::slot_duration()) - } -} - -impl ProvideInherent for Module { - type Call = pallet_timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_: &InherentData) -> Option { - None - } - - /// Verify the validity of the inherent using the timestamp. - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; - - let timestamp_based_slot = timestamp / Self::slot_duration(); + let slot_duration = Self::slot_duration(); + assert!(!slot_duration.is_zero(), "Aura slot duration cannot be zero."); - let seal_slot = data.aura_inherent_data()?.saturated_into(); + let timestamp_slot = moment / slot_duration; + let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - if timestamp_based_slot == seal_slot { - Ok(()) - } else { - Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) - } + assert!(CurrentSlot::::get() == timestamp_slot, "Timestamp slot must match `CurrentSlot`"); } } diff --git a/frame/aura/src/migrations.rs b/frame/aura/src/migrations.rs new file mode 100644 index 0000000000000..038c5b3f3f18b --- /dev/null +++ b/frame/aura/src/migrations.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations for the AURA pallet. + +use frame_support::{traits::Get, weights::Weight, pallet_prelude::*}; + +struct __LastTimestamp(sp_std::marker::PhantomData); +impl frame_support::traits::StorageInstance for __LastTimestamp { + fn pallet_prefix() -> &'static str { T::PalletPrefix::get() } + const STORAGE_PREFIX: &'static str = "LastTimestamp"; +} + +type LastTimestamp = StorageValue<__LastTimestamp, (), ValueQuery>; + +pub trait RemoveLastTimestamp: super::Config { + type PalletPrefix: Get<&'static str>; +} + +/// Remove the `LastTimestamp` storage value. +/// +/// This storage value was removed and replaced by `CurrentSlot`. As we only remove this storage +/// value, it is safe to call this method multiple times. +/// +/// This migration requires a type `T` that implements [`RemoveLastTimestamp`]. +pub fn remove_last_timestamp() -> Weight { + LastTimestamp::::kill(); + T::DbWeight::get().writes(1) +} diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index a3875727e47c2..a5ef12f5935f1 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,77 +19,74 @@ #![cfg(test)] -use crate::{Trait, Module, GenesisConfig}; +use crate as pallet_aura; use sp_consensus_aura::ed25519::AuthorityId; -use sp_runtime::{ - traits::IdentityLookup, Perbill, - testing::{Header, UintAuthorityId}, -}; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; -use sp_io; +use sp_runtime::{traits::IdentityLookup, testing::{Header, UintAuthorityId}}; +use frame_support::{parameter_types, traits::GenesisBuild}; use sp_core::H256; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Aura: pallet_aura::{Module, Call, Storage, Config}, + } +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub const MinimumPeriod: u64 = 1; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl Trait for Test { +impl pallet_aura::Config for Test { type AuthorityId = AuthorityId; } pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig::{ + pallet_aura::GenesisConfig::{ authorities: authorities.into_iter().map(|a| UintAuthorityId(a).to_public_key()).collect(), }.assimilate_storage(&mut t).unwrap(); t.into() } - -pub type Aura = Module; diff --git a/frame/aura/src/tests.rs b/frame/aura/src/tests.rs index ca0fc3de37638..18e14e802bd32 100644 --- a/frame/aura/src/tests.rs +++ b/frame/aura/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,7 @@ use crate::mock::{Aura, new_test_ext}; #[test] fn initial_values() { new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - assert_eq!(Aura::last(), 0u64); + assert_eq!(Aura::current_slot(), 0u64); assert_eq!(Aura::authorities().len(), 4); }); } diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 0e1db74632786..43a09b01fd45d 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-authority-discovery = { version = "3.0.0", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-session = { version = "3.0.0", features = ["historical" ], path = "../session", default-features = false } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } [features] default = ["std"] diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 09be533474fca..cc3f41f59ed89 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,25 @@ //! # Authority discovery module. //! -//! This module is used by the `client/authority-discovery` to retrieve the -//! current set of authorities. +//! This module is used by the `client/authority-discovery` and by polkadot's parachain logic +//! to retrieve the current and the next set of authorities. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; -use frame_support::{decl_module, decl_storage}; +use sp_std::prelude::*; +use frame_support::{decl_module, decl_storage, traits::OneSessionHandler}; use sp_authority_discovery::AuthorityId; /// The module's config trait. -pub trait Trait: frame_system::Trait + pallet_session::Trait {} +pub trait Config: frame_system::Config + pallet_session::Config {} decl_storage! { - trait Store for Module as AuthorityDiscovery { - /// Keys of the current and next authority set. + trait Store for Module as AuthorityDiscovery { + /// Keys of the current authority set. Keys get(fn keys): Vec; + /// Keys of the next authority set. + NextKeys get(fn next_keys): Vec; } add_extra_genesis { config(keys): Vec; @@ -42,29 +44,48 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { } } -impl Module { - /// Retrieve authority identifiers of the current and next authority set. +impl Module { + /// Retrieve authority identifiers of the current and next authority set + /// sorted and deduplicated. pub fn authorities() -> Vec { + let mut keys = Keys::get(); + let next = NextKeys::get(); + + keys.extend(next); + keys.sort(); + keys.dedup(); + + keys + } + + /// Retrieve authority identifiers of the current authority set in the original order. + pub fn current_authorities() -> Vec { Keys::get() } + /// Retrieve authority identifiers of the next authority set in the original order. + pub fn next_authorities() -> Vec { + NextKeys::get() + } + fn initialize_keys(keys: &[AuthorityId]) { if !keys.is_empty() { assert!(Keys::get().is_empty(), "Keys are already initialized!"); Keys::put(keys); + NextKeys::put(keys); } } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Module { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(authorities: I) @@ -80,8 +101,10 @@ impl pallet_session::OneSessionHandler for Module { { // Remember who the authorities are for the new and next session. if changed { - let keys = validators.chain(queued_validators).map(|x| x.1).collect::>(); - Keys::put(keys.into_iter().collect::>()); + let keys = validators.map(|x| x.1); + Keys::put(keys.collect::>()); + let next_keys = queued_validators.map(|x| x.1); + NextKeys::put(next_keys.collect::>()); } } @@ -92,8 +115,9 @@ impl pallet_session::OneSessionHandler for Module { #[cfg(test)] mod tests { + use crate as pallet_authority_discovery; use super::*; - use sp_authority_discovery::{AuthorityPair}; + use sp_authority_discovery::AuthorityPair; use sp_application_crypto::Pair; use sp_core::{crypto::key_types, H256}; use sp_io::TestExternalities; @@ -101,24 +125,35 @@ mod tests { testing::{Header, UintAuthorityId}, traits::{ConvertInto, IdentityLookup, OpaqueKeys}, Perbill, KeyTypeId, }; - use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; - - type AuthorityDiscovery = Module; + use frame_support::parameter_types; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl Trait for Test {} + impl Config for Test {} parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } - impl pallet_session::Trait for Test { + impl pallet_session::Config for Test { type SessionManager = (); type Keys = UintAuthorityId; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = TestSessionHandler; - type Event = (); + type Event = Event; type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; @@ -126,7 +161,7 @@ mod tests { type WeightInfo = (); } - impl pallet_session::historical::Trait for Test { + impl pallet_session::historical::Config for Test { type FullIdentification = (); type FullIdentificationOf = (); } @@ -138,41 +173,33 @@ mod tests { pub const Offset: BlockNumber = 0; pub const UncleGenerations: u64 = 0; pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AuthorityId; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); - } - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + type SS58Prefix = (); } pub struct TestSessionHandler; @@ -228,7 +255,7 @@ mod tests { .build_storage::() .unwrap(); - GenesisConfig { + pallet_authority_discovery::GenesisConfig { keys: vec![], } .assimilate_storage::(&mut t) @@ -238,7 +265,7 @@ mod tests { let mut externalities = TestExternalities::new(t); externalities.execute_with(|| { - use pallet_session::OneSessionHandler; + use frame_support::traits::OneSessionHandler; AuthorityDiscovery::on_genesis_session( first_authorities.iter().map(|id| (id, id.clone())) @@ -254,8 +281,7 @@ mod tests { second_authorities_and_account_ids.clone().into_iter(), third_authorities_and_account_ids.clone().into_iter(), ); - let mut authorities_returned = AuthorityDiscovery::authorities(); - authorities_returned.sort(); + let authorities_returned = AuthorityDiscovery::authorities(); assert_eq!( first_authorities, authorities_returned, diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index e8b6444583821..ab48fbec8f50a 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authorship" -version = "2.0.0" +version = "3.0.0" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" @@ -13,18 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -impl-trait-for-tuples = "0.1.3" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +sp-authorship = { version = "3.0.0", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +serde = { version = "1.0.101" } [features] default = ["std"] diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 0a10c8849571b..3d89ab24d01cf 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +34,7 @@ use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; const MAX_UNCLES: usize = 10; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Find the author of a block. type FindAuthor: FindAuthor; /// The number of blocks back we should accept uncles. @@ -152,7 +152,7 @@ enum UncleEntryItem { } decl_storage! { - trait Store for Module as Authorship { + trait Store for Module as Authorship { /// Uncles Uncles: Vec>; /// Author of current block. @@ -164,7 +164,7 @@ decl_storage! { decl_error! { /// Error for the authorship module. - pub enum Error for Module { + pub enum Error for Module { /// The uncle parent not in the chain. InvalidUncleParent, /// Uncles already set in the block. @@ -183,7 +183,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn on_initialize(now: T::BlockNumber) -> Weight { @@ -223,7 +223,7 @@ decl_module! { } } -impl Module { +impl Module { /// Fetch the author of the block. /// /// This is safe to invoke in `on_initialize` implementations, as well @@ -337,7 +337,7 @@ impl Module { } } -impl ProvideInherent for Module { +impl ProvideInherent for Module { type Call = Call; type Error = InherentError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; @@ -396,69 +396,70 @@ impl ProvideInherent for Module { #[cfg(test)] mod tests { + use crate as pallet_authorship; use super::*; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, Perbill, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, }; - use frame_support::{parameter_types, impl_outer_origin, ConsensusEngineId, weights::Weight}; + use frame_support::{parameter_types, ConsensusEngineId}; - impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; - #[derive(Clone, Eq, PartialEq)] - pub struct Test; + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, + } + ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const UncleGenerations: u64 = 5; } - impl Trait for Test { + impl Config for Test { type FindAuthor = AuthorGiven; type UncleGenerations = UncleGenerations; type FilterUncle = SealVerify; type EventHandler = (); } - type System = frame_system::Module; - type Authorship = Module; - const TEST_ID: ConsensusEngineId = [1, 2, 3, 4]; pub struct AuthorGiven; @@ -587,7 +588,6 @@ mod tests { &number, &hash, &Default::default(), - &Default::default(), Default::default() ); @@ -686,7 +686,6 @@ mod tests { System::initialize( &1, &Default::default(), - &Default::default(), header.digest(), Default::default(), ); diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index a210a2a8ef06a..9bde93506241d 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-babe" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,32 +13,32 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } +pallet-session = { version = "3.0.0", default-features = false, path = "../session" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../timestamp" } serde = { version = "1.0.101", optional = true } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-vrf = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/vrf" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-vrf = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/vrf" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "3.0.0", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../primitives/timestamp" } [dev-dependencies] -frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-offences = { version = "2.0.0", path = "../offences" } -pallet-staking = { version = "2.0.0", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +frame-benchmarking = { version = "3.0.0", path = "../benchmarking" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-offences = { version = "3.0.0", path = "../offences" } +pallet-staking = { version = "3.0.0", path = "../staking" } +pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index 8ee4a5913c885..087cac2ed6cc6 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,8 +23,6 @@ use frame_benchmarking::benchmarks; type Header = sp_runtime::generic::Header; benchmarks! { - _ { } - check_equivocation_proof { let x in 0 .. 1; diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index a0e13781961cc..c7c87b5837401 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 322dff92f2398..b7275d04734ea 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,8 +35,11 @@ //! definition. //! -use frame_support::{debug, traits::KeyOwnerProofSystem}; -use sp_consensus_babe::{EquivocationProof, SlotNumber}; +use frame_support::{ + debug, + traits::{Get, KeyOwnerProofSystem}, +}; +use sp_consensus_babe::{EquivocationProof, Slot}; use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, @@ -48,14 +51,18 @@ use sp_staking::{ }; use sp_std::prelude::*; -use crate::{Call, Module, Trait}; +use crate::{Call, Module, Config}; /// A trait with utility methods for handling equivocation reports in BABE. /// The trait provides methods for reporting an offence triggered by a valid /// equivocation report, checking the current block author (to declare as the /// reporter), and also for creating and submitting equivocation report /// extrinsics (useful only in offchain context). -pub trait HandleEquivocation { +pub trait HandleEquivocation { + /// The longevity, in blocks, that the equivocation report is valid for. When using the staking + /// pallet this should be equal to the bonding duration (in blocks, not eras). + type ReportLongevity: Get; + /// Report an offence proved by the given reporters. fn report_offence( reporters: Vec, @@ -63,7 +70,7 @@ pub trait HandleEquivocation { ) -> Result<(), OffenceError>; /// Returns true if all of the offenders at the given time slot have already been reported. - fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &SlotNumber) -> bool; + fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &Slot) -> bool; /// Create and dispatch an equivocation report extrinsic. fn submit_unsigned_equivocation_report( @@ -75,7 +82,9 @@ pub trait HandleEquivocation { fn block_author() -> Option; } -impl HandleEquivocation for () { +impl HandleEquivocation for () { + type ReportLongevity = (); + fn report_offence( _reporters: Vec, _offence: BabeEquivocationOffence, @@ -83,7 +92,7 @@ impl HandleEquivocation for () { Ok(()) } - fn is_known_offence(_offenders: &[T::KeyOwnerIdentification], _time_slot: &SlotNumber) -> bool { + fn is_known_offence(_offenders: &[T::KeyOwnerIdentification], _time_slot: &Slot) -> bool { true } @@ -103,11 +112,11 @@ impl HandleEquivocation for () { /// using existing subsystems that are part of frame (type bounds described /// below) and will dispatch to them directly, it's only purpose is to wire all /// subsystems together. -pub struct EquivocationHandler { - _phantom: sp_std::marker::PhantomData<(I, R)>, +pub struct EquivocationHandler { + _phantom: sp_std::marker::PhantomData<(I, R, L)>, } -impl Default for EquivocationHandler { +impl Default for EquivocationHandler { fn default() -> Self { Self { _phantom: Default::default(), @@ -115,12 +124,12 @@ impl Default for EquivocationHandler { } } -impl HandleEquivocation for EquivocationHandler +impl HandleEquivocation for EquivocationHandler where // We use the authorship pallet to fetch the current block author and use // `offchain::SendTransactionTypes` for unsigned extrinsic creation and // submission. - T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, // A system for reporting offences after valid equivocation reports are // processed. R: ReportOffence< @@ -128,7 +137,12 @@ where T::KeyOwnerIdentification, BabeEquivocationOffence, >, + // The longevity (in blocks) that the equivocation report is valid for. When using the staking + // pallet this should be the bonding duration. + L: Get, { + type ReportLongevity = L; + fn report_offence( reporters: Vec, offence: BabeEquivocationOffence, @@ -136,7 +150,7 @@ where R::report_offence(reporters, offence) } - fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &SlotNumber) -> bool { + fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &Slot) -> bool { R::is_known_offence(offenders, time_slot) } @@ -164,10 +178,10 @@ where /// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` /// to local calls (i.e. extrinsics generated on this node) or that already in a block. This /// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { + if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { // discard equivocation report not coming from the local node match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } @@ -181,14 +195,20 @@ impl frame_support::unsigned::ValidateUnsigned for Module { } } + // check report staleness + is_known_offence::(equivocation_proof, key_owner_proof)?; + + let longevity = >::ReportLongevity::get(); + ValidTransaction::with_tag_prefix("BabeEquivocation") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) // Only one equivocation report for the same offender at the same slot. .and_provides(( equivocation_proof.offender.clone(), - equivocation_proof.slot_number, + *equivocation_proof.slot, )) + .longevity(longevity) // We don't propagate this. This can never be included on a remote node. .propagate(false) .build() @@ -199,39 +219,41 @@ impl frame_support::unsigned::ValidateUnsigned for Module { fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { - // check the membership proof to extract the offender's id - let key = ( - sp_consensus_babe::KEY_TYPE, - equivocation_proof.offender.clone(), - ); - - let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) - .ok_or(InvalidTransaction::BadProof)?; - - // check if the offence has already been reported, - // and if so then we can discard the report. - let is_known_offence = T::HandleEquivocation::is_known_offence( - &[offender], - &equivocation_proof.slot_number, - ); - - if is_known_offence { - Err(InvalidTransaction::Stale.into()) - } else { - Ok(()) - } + is_known_offence::(equivocation_proof, key_owner_proof) } else { Err(InvalidTransaction::Call.into()) } } } +fn is_known_offence( + equivocation_proof: &EquivocationProof, + key_owner_proof: &T::KeyOwnerProof, +) -> Result<(), TransactionValidityError> { + // check the membership proof to extract the offender's id + let key = ( + sp_consensus_babe::KEY_TYPE, + equivocation_proof.offender.clone(), + ); + + let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) + .ok_or(InvalidTransaction::BadProof)?; + + // check if the offence has already been reported, + // and if so then we can discard the report. + if T::HandleEquivocation::is_known_offence(&[offender], &equivocation_proof.slot) { + Err(InvalidTransaction::Stale.into()) + } else { + Ok(()) + } +} + /// A BABE equivocation offence report. /// /// When a validator released two or more blocks at the same slot. pub struct BabeEquivocationOffence { - /// A babe slot number in which this incident happened. - pub slot: SlotNumber, + /// A babe slot in which this incident happened. + pub slot: Slot, /// The session index in which the incident happened. pub session_index: SessionIndex, /// The size of the validator set at the time of the offence. @@ -244,7 +266,7 @@ impl Offence for BabeEquivocationOffence { const ID: Kind = *b"babe:equivocatio"; - type TimeSlot = SlotNumber; + type TimeSlot = Slot; fn offenders(&self) -> Vec { vec![self.offender.clone()] diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 8cab698fda093..0afa0e1d0980f 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,7 +25,7 @@ use codec::{Decode, Encode}; use frame_support::{ decl_error, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, - traits::{FindAuthor, Get, KeyOwnerProofSystem, Randomness as RandomnessT}, + traits::{FindAuthor, Get, KeyOwnerProofSystem, OneSessionHandler, Randomness as RandomnessT}, weights::{Pays, Weight}, Parameter, }; @@ -43,7 +43,7 @@ use sp_timestamp::OnTimestampSet; use sp_consensus_babe::{ digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, inherents::{BabeInherentData, INHERENT_IDENTIFIER}, - BabeAuthorityWeight, ConsensusLog, EquivocationProof, SlotNumber, BABE_ENGINE_ID, + BabeAuthorityWeight, ConsensusLog, Epoch, EquivocationProof, Slot, BABE_ENGINE_ID, }; use sp_consensus_vrf::schnorrkel; use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; @@ -62,9 +62,11 @@ mod tests; pub use equivocation::{BabeEquivocationOffence, EquivocationHandler, HandleEquivocation}; -pub trait Trait: pallet_timestamp::Trait { +pub trait Config: pallet_timestamp::Config { /// The amount of time, in slots, that each epoch should last. - type EpochDuration: Get; + /// NOTE: Currently it is not possible to change the epoch duration after + /// the chain has started. Attempting to do so will brick block production. + type EpochDuration: Get; /// The expected average block time at which BABE should be creating /// blocks. Since BABE is probabilistic it is not trivial to figure out @@ -115,7 +117,7 @@ pub trait WeightInfo { pub trait EpochChangeTrigger { /// Trigger an epoch change, if any should take place. This should be called /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); + fn trigger(now: T::BlockNumber); } /// A type signifying to BABE that an external trigger @@ -123,7 +125,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. + fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. } /// A type signifying to BABE that it should perform epoch changes @@ -131,7 +133,7 @@ impl EpochChangeTrigger for ExternalTrigger { pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { + fn trigger(now: T::BlockNumber) { if >::should_epoch_change(now) { let authorities = >::authorities(); let next_authorities = authorities.clone(); @@ -146,7 +148,7 @@ const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; type MaybeRandomness = Option; decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// An equivocation proof provided as part of an equivocation report is invalid. InvalidEquivocationProof, /// A key ownership proof provided as part of an equivocation report is invalid. @@ -157,7 +159,7 @@ decl_error! { } decl_storage! { - trait Store for Module as Babe { + trait Store for Module as Babe { /// Current epoch index. pub EpochIndex get(fn epoch_index): u64; @@ -166,10 +168,10 @@ decl_storage! { /// The slot at which the first epoch actually started. This is 0 /// until the first block of the chain. - pub GenesisSlot get(fn genesis_slot): u64; + pub GenesisSlot get(fn genesis_slot): Slot; /// Current slot number. - pub CurrentSlot get(fn current_slot): u64; + pub CurrentSlot get(fn current_slot): Slot; /// The epoch randomness for the *current* epoch. /// @@ -192,6 +194,9 @@ decl_storage! { /// Next epoch randomness. NextRandomness: schnorrkel::Randomness; + /// Next epoch authorities. + NextAuthorities: Vec<(AuthorityId, BabeAuthorityWeight)>; + /// Randomness under construction. /// /// We make a tradeoff between storage accesses and list length. @@ -230,9 +235,12 @@ decl_storage! { decl_module! { /// The BABE Pallet - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The number of **slots** that an epoch takes. We couple sessions to /// epochs, i.e. we start a new session once the new epoch begins. + /// NOTE: Currently it is not possible to change the epoch duration + /// after the chain has started. Attempting to do so will brick block + /// production. const EpochDuration: u64 = T::EpochDuration::get(); /// The expected average block time at which BABE should be creating @@ -271,7 +279,7 @@ decl_module! { /// the equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence will /// be reported. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] + #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation( origin, equivocation_proof: EquivocationProof, @@ -294,7 +302,7 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] + #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation_unsigned( origin, equivocation_proof: EquivocationProof, @@ -311,7 +319,7 @@ decl_module! { } } -impl RandomnessT<::Hash> for Module { +impl RandomnessT<::Hash> for Module { /// Some BABE blocks have VRF outputs where the block producer has exactly one bit of influence, /// either they make the block or they do not make the block and thus someone else makes the /// next block. Yet, this randomness is not fresh in all BABE blocks. @@ -332,14 +340,14 @@ impl RandomnessT<::Hash> for Module { subject.reserve(VRF_OUTPUT_LENGTH); subject.extend_from_slice(&Self::randomness()[..]); - ::Hashing::hash(&subject[..]) + ::Hashing::hash(&subject[..]) } } /// A BABE public key pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; -impl FindAuthor for Module { +impl FindAuthor for Module { fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator { @@ -354,7 +362,7 @@ impl FindAuthor for Module { } } -impl IsMember for Module { +impl IsMember for Module { fn is_member(authority_id: &AuthorityId) -> bool { >::authorities() .iter() @@ -362,7 +370,7 @@ impl IsMember for Module { } } -impl pallet_session::ShouldEndSession for Module { +impl pallet_session::ShouldEndSession for Module { fn should_end_session(now: T::BlockNumber) -> bool { // it might be (and it is in current implementation) that session module is calling // should_end_session() from it's own on_initialize() handler @@ -374,12 +382,12 @@ impl pallet_session::ShouldEndSession for Module { } } -impl Module { +impl Module { /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within // the majority of their slot. - ::MinimumPeriod::get().saturating_mul(2u32.into()) + ::MinimumPeriod::get().saturating_mul(2u32.into()) } /// Determine whether an epoch change should take place at this block. @@ -395,7 +403,7 @@ impl Module { // so we don't rotate the epoch. now != One::one() && { let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); - diff >= T::EpochDuration::get() + *diff >= T::EpochDuration::get() } } @@ -416,7 +424,7 @@ impl Module { pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); next_slot - .checked_sub(CurrentSlot::get()) + .checked_sub(*CurrentSlot::get()) .map(|slots_remaining| { // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); @@ -464,6 +472,9 @@ impl Module { let randomness = Self::randomness_change_epoch(next_epoch_index); Randomness::put(randomness); + // Update the next epoch authorities. + NextAuthorities::put(&next_authorities); + // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. let next_randomness = NextRandomness::get(); @@ -479,11 +490,52 @@ impl Module { } } - // finds the start slot of the current epoch. only guaranteed to - // give correct results after `do_initialize` of the first block - // in the chain (as its result is based off of `GenesisSlot`). - pub fn current_epoch_start() -> SlotNumber { - (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() + /// Finds the start slot of the current epoch. only guaranteed to + /// give correct results after `do_initialize` of the first block + /// in the chain (as its result is based off of `GenesisSlot`). + pub fn current_epoch_start() -> Slot { + Self::epoch_start(EpochIndex::get()) + } + + /// Produces information about the current epoch. + pub fn current_epoch() -> Epoch { + Epoch { + epoch_index: EpochIndex::get(), + start_slot: Self::current_epoch_start(), + duration: T::EpochDuration::get(), + authorities: Self::authorities(), + randomness: Self::randomness(), + } + } + + /// Produces information about the next epoch (which was already previously + /// announced). + pub fn next_epoch() -> Epoch { + let next_epoch_index = EpochIndex::get().checked_add(1).expect( + "epoch index is u64; it is always only incremented by one; \ + if u64 is not enough we should crash for safety; qed.", + ); + + Epoch { + epoch_index: next_epoch_index, + start_slot: Self::epoch_start(next_epoch_index), + duration: T::EpochDuration::get(), + authorities: NextAuthorities::get(), + randomness: NextRandomness::get(), + } + } + + fn epoch_start(epoch_index: u64) -> Slot { + // (epoch_index * epoch_duration) + genesis_slot + + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + let epoch_start = epoch_index + .checked_mul(T::EpochDuration::get()) + .expect(PROOF); + + epoch_start.checked_add(*GenesisSlot::get()).expect(PROOF).into() } fn deposit_consensus(new: U) { @@ -531,9 +583,9 @@ impl Module { // on the first non-zero block (i.e. block #1) // this is where the first epoch (epoch #0) actually starts. // we need to adjust internal storage accordingly. - if GenesisSlot::get() == 0 { - GenesisSlot::put(digest.slot_number()); - debug_assert_ne!(GenesisSlot::get(), 0); + if *GenesisSlot::get() == 0 { + GenesisSlot::put(digest.slot()); + debug_assert_ne!(*GenesisSlot::get(), 0); // deposit a log because this is the first block in epoch #0 // we use the same values as genesis because we haven't collected any @@ -547,11 +599,11 @@ impl Module { } // the slot number of the current block being initialized - let current_slot = digest.slot_number(); + let current_slot = digest.slot(); // how many slots were skipped between current and last block let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); - let lateness = T::BlockNumber::from(lateness as u32); + let lateness = T::BlockNumber::from(*lateness as u32); Lateness::::put(lateness); CurrentSlot::put(current_slot); @@ -622,6 +674,7 @@ impl Module { if !authorities.is_empty() { assert!(Authorities::get().is_empty(), "Authorities are already initialized!"); Authorities::put(authorities); + NextAuthorities::put(authorities); } } @@ -631,7 +684,7 @@ impl Module { key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { let offender = equivocation_proof.offender.clone(); - let slot_number = equivocation_proof.slot_number; + let slot = equivocation_proof.slot; // validate the equivocation proof if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { @@ -641,7 +694,7 @@ impl Module { let validator_set_count = key_owner_proof.validator_count(); let session_index = key_owner_proof.session(); - let epoch_index = (slot_number.saturating_sub(GenesisSlot::get()) / T::EpochDuration::get()) + let epoch_index = (*slot.saturating_sub(GenesisSlot::get()) / T::EpochDuration::get()) .saturated_into::(); // check that the slot number is consistent with the session index @@ -656,7 +709,7 @@ impl Module { .ok_or(Error::::InvalidKeyOwnershipProof)?; let offence = BabeEquivocationOffence { - slot: slot_number, + slot, validator_set_count, offender, session_index, @@ -690,11 +743,11 @@ impl Module { } } -impl OnTimestampSet for Module { +impl OnTimestampSet for Module { fn on_timestamp_set(_moment: T::Moment) { } } -impl frame_support::traits::EstimateNextSessionRotation for Module { +impl frame_support::traits::EstimateNextSessionRotation for Module { fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { Self::next_expected_epoch_change(now) } @@ -706,17 +759,17 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness for Module { +impl frame_support::traits::Lateness for Module { fn lateness(&self) -> T::BlockNumber { Self::lateness() } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Module { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -766,7 +819,7 @@ fn compute_randomness( sp_io::hashing::blake2_256(&s) } -impl ProvideInherent for Module { +impl ProvideInherent for Module { type Call = pallet_timestamp::Call; type Error = MakeFatalError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; @@ -784,7 +837,7 @@ impl ProvideInherent for Module { let timestamp_based_slot = (timestamp / Self::slot_duration()).saturated_into::(); let seal_slot = data.babe_inherent_data()?; - if timestamp_based_slot == seal_slot { + if timestamp_based_slot == *seal_slot { Ok(()) } else { Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 9f00a4ddfc3cd..e3d2eb19ef264 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ //! Test utilities use codec::Encode; -use super::{Trait, Module, CurrentSlot}; +use crate::{self as pallet_babe, Config, CurrentSlot}; use sp_runtime::{ Perbill, impl_opaque_keys, curve::PiecewiseLinear, @@ -27,46 +27,52 @@ use sp_runtime::{ }; use frame_system::InitKind; use frame_support::{ - impl_outer_dispatch, impl_outer_origin, parameter_types, StorageValue, + parameter_types, StorageValue, traits::{KeyOwnerProofSystem, OnInitialize}, weights::Weight, }; use sp_io; use sp_core::{H256, U256, crypto::{IsWrappedBy, KeyTypeId, Pair}}; -use sp_consensus_babe::{AuthorityId, AuthorityPair, SlotNumber}; +use sp_consensus_babe::{AuthorityId, AuthorityPair, Slot}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_staking::SessionIndex; use pallet_staking::EraIndex; - -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - babe::Babe, - staking::Staking, - } -} +use pallet_session::historical as pallet_session_historical; type DummyValidatorId = u64; -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Historical: pallet_session_historical::{Module}, + Offences: pallet_offences::{Module, Call, Storage, Event}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, + Staking: pallet_staking::{Module, Call, Storage, Config, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + } +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const EpochDuration: u64 = 3; - pub const ExpectedBlockTime: u64 = 1; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -77,20 +83,14 @@ impl frame_system::Trait for Test { type AccountId = DummyValidatorId; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } impl frame_system::offchain::SendTransactionTypes for Test @@ -107,9 +107,9 @@ impl_opaque_keys! { } } -impl pallet_session::Trait for Test { - type Event = (); - type ValidatorId = ::AccountId; +impl pallet_session::Config for Test { + type Event = Event; + type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; @@ -120,7 +120,7 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -129,7 +129,7 @@ parameter_types! { pub const UncleGenerations: u64 = 0; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -140,7 +140,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 1; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -151,11 +151,11 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u128; type DustRemoval = (); - type Event = (); + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -183,10 +183,10 @@ parameter_types! { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type Event = (); + type Event = Event; type Currency = Balances; type Slash = (); type Reward = (); @@ -209,17 +209,25 @@ impl pallet_staking::Trait for Test { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) + * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Test { - type Event = (); +impl pallet_offences::Config for Test { + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl Trait for Test { +parameter_types! { + pub const EpochDuration: u64 = 3; + pub const ExpectedBlockTime: u64 = 1; + pub const ReportLongevity: u64 = + BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * EpochDuration::get(); +} + +impl Config for Test { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = crate::ExternalTrigger; @@ -234,19 +242,12 @@ impl Trait for Test { AuthorityId, )>>::IdentificationTuple; - type HandleEquivocation = super::EquivocationHandler; + type HandleEquivocation = + super::EquivocationHandler; + type WeightInfo = (); } -pub type Balances = pallet_balances::Module; -pub type Historical = pallet_session::historical::Module; -pub type Offences = pallet_offences::Module; -pub type Session = pallet_session::Module; -pub type Staking = pallet_staking::Module; -pub type System = frame_system::Module; -pub type Timestamp = pallet_timestamp::Module; -pub type Babe = Module; - pub fn go_to_block(n: u64, s: u64) { use frame_support::traits::OnFinalize; @@ -261,14 +262,14 @@ pub fn go_to_block(n: u64, s: u64) { System::parent_hash() }; - let pre_digest = make_secondary_plain_pre_digest(0, s); + let pre_digest = make_secondary_plain_pre_digest(0, s.into()); - System::initialize(&n, &parent_hash, &Default::default(), &pre_digest, InitKind::Full); + System::initialize(&n, &parent_hash, &pre_digest, InitKind::Full); System::set_block_number(n); Timestamp::set_timestamp(n); if s > 1 { - CurrentSlot::put(s); + CurrentSlot::put(Slot::from(s)); } System::on_initialize(n); @@ -278,8 +279,8 @@ pub fn go_to_block(n: u64, s: u64) { /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { - let mut slot = Babe::current_slot() + 1; - for i in System::block_number()+1..=n { + let mut slot = u64::from(Babe::current_slot()) + 1; + for i in System::block_number() + 1 ..= n { go_to_block(i, slot); slot += 1; } @@ -298,16 +299,16 @@ pub fn start_era(era_index: EraIndex) { assert_eq!(Staking::current_era(), Some(era_index)); } -pub fn make_pre_digest( +pub fn make_primary_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, + slot: sp_consensus_babe::Slot, vrf_output: VRFOutput, vrf_proof: VRFProof, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::Primary( sp_consensus_babe::digests::PrimaryPreDigest { authority_index, - slot_number, + slot, vrf_output, vrf_proof, } @@ -318,12 +319,12 @@ pub fn make_pre_digest( pub fn make_secondary_plain_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, + slot: sp_consensus_babe::Slot, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::SecondaryPlain( sp_consensus_babe::digests::SecondaryPlainPreDigest { authority_index, - slot_number, + slot, } ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); @@ -332,14 +333,14 @@ pub fn make_secondary_plain_pre_digest( pub fn make_secondary_vrf_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, + slot: sp_consensus_babe::Slot, vrf_output: VRFOutput, vrf_proof: VRFProof, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::SecondaryVRF( sp_consensus_babe::digests::SecondaryVRFPreDigest { authority_index, - slot_number, + slot, vrf_output, vrf_proof, } @@ -349,11 +350,11 @@ pub fn make_secondary_vrf_pre_digest( } pub fn make_vrf_output( - slot_number: u64, + slot: Slot, pair: &sp_consensus_babe::AuthorityPair ) -> (VRFOutput, VRFProof, [u8; 32]) { let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); - let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot_number, 0); + let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot, 0); let vrf_inout = pair.vrf_sign(transcript); let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = vrf_inout.0 .make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); @@ -382,6 +383,14 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes .build_storage::() .unwrap(); + let balances: Vec<_> = (0..authorities.len()) + .map(|i| (i as u64, 10_000_000)) + .collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + // stashes are the index. let session_keys: Vec<_> = authorities .iter() @@ -397,6 +406,12 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes }) .collect(); + // NOTE: this will initialize the babe authorities + // through OneSessionHandler::on_genesis_session + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + // controllers are the index + 1000 let stakers: Vec<_> = (0..authorities.len()) .map(|i| { @@ -409,20 +424,6 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes }) .collect(); - let balances: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, 10_000_000)) - .collect(); - - // NOTE: this will initialize the babe authorities - // through OneSessionHandler::on_genesis_session - pallet_session::GenesisConfig:: { keys: session_keys } - .assimilate_storage(&mut t) - .unwrap(); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - let staking_config = pallet_staking::GenesisConfig:: { stakers, validator_count: 8, @@ -441,7 +442,7 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes pub fn generate_equivocation_proof( offender_authority_index: u32, offender_authority_pair: &AuthorityPair, - slot_number: SlotNumber, + slot: Slot, ) -> sp_consensus_babe::EquivocationProof

{ use sp_consensus_babe::digests::CompatibleDigestItem; @@ -450,8 +451,8 @@ pub fn generate_equivocation_proof( let make_header = || { let parent_hash = System::parent_hash(); - let pre_digest = make_secondary_plain_pre_digest(offender_authority_index, slot_number); - System::initialize(¤t_block, &parent_hash, &Default::default(), &pre_digest, InitKind::Full); + let pre_digest = make_secondary_plain_pre_digest(offender_authority_index, slot); + System::initialize(¤t_block, &parent_hash, &pre_digest, InitKind::Full); System::set_block_number(current_block); Timestamp::set_timestamp(current_block); System::finalize() @@ -475,10 +476,10 @@ pub fn generate_equivocation_proof( seal_header(&mut h2); // restore previous runtime state - go_to_block(current_block, current_slot); + go_to_block(current_block, *current_slot); sp_consensus_babe::EquivocationProof { - slot_number, + slot, offender: offender_authority_pair.public(), first_header: h1, second_header: h2, diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 06bf84614ca6d..8576389af31ff 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,7 +25,7 @@ use frame_support::{ }; use mock::*; use pallet_session::ShouldEndSession; -use sp_consensus_babe::AllowedSlots; +use sp_consensus_babe::{AllowedSlots, Slot}; use sp_core::crypto::Pair; const EMPTY_RANDOMNESS: [u8; 32] = [ @@ -62,22 +62,21 @@ fn first_block_epoch_zero_start() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { - let genesis_slot = 100; + let genesis_slot = Slot::from(100); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); let first_vrf = vrf_output; - let pre_digest = make_pre_digest( + let pre_digest = make_primary_pre_digest( 0, genesis_slot, first_vrf.clone(), vrf_proof, ); - assert_eq!(Babe::genesis_slot(), 0); + assert_eq!(Babe::genesis_slot(), Slot::from(0)); System::initialize( &1, &Default::default(), - &Default::default(), &pre_digest, Default::default(), ); @@ -121,14 +120,13 @@ fn author_vrf_output_for_primary() { let (pairs, mut ext) = new_test_ext_with_pairs(1); ext.execute_with(|| { - let genesis_slot = 10; + let genesis_slot = Slot::from(10); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let primary_pre_digest = make_pre_digest(0, genesis_slot, vrf_output, vrf_proof); + let primary_pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_output, vrf_proof); System::initialize( &1, &Default::default(), - &Default::default(), &primary_pre_digest, Default::default(), ); @@ -148,14 +146,13 @@ fn author_vrf_output_for_secondary_vrf() { let (pairs, mut ext) = new_test_ext_with_pairs(1); ext.execute_with(|| { - let genesis_slot = 10; + let genesis_slot = Slot::from(10); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); let secondary_vrf_pre_digest = make_secondary_vrf_pre_digest(0, genesis_slot, vrf_output, vrf_proof); System::initialize( &1, &Default::default(), - &Default::default(), &secondary_vrf_pre_digest, Default::default(), ); @@ -173,13 +170,12 @@ fn author_vrf_output_for_secondary_vrf() { #[test] fn no_author_vrf_output_for_secondary_plain() { new_test_ext(1).execute_with(|| { - let genesis_slot = 10; + let genesis_slot = Slot::from(10); let secondary_plain_pre_digest = make_secondary_plain_pre_digest(0, genesis_slot); System::initialize( &1, &Default::default(), - &Default::default(), &secondary_plain_pre_digest, Default::default(), ); @@ -206,20 +202,20 @@ fn authority_index() { #[test] fn can_predict_next_epoch_change() { new_test_ext(1).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); + assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(Babe::genesis_slot(), 6); - assert_eq!(Babe::current_slot(), 6); + assert_eq!(*Babe::genesis_slot(), 6); + assert_eq!(*Babe::current_slot(), 6); assert_eq!(Babe::epoch_index(), 0); progress_to_block(5); assert_eq!(Babe::epoch_index(), 5 / 3); - assert_eq!(Babe::current_slot(), 10); + assert_eq!(*Babe::current_slot(), 10); // next epoch change will be at - assert_eq!(Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now + assert_eq!(*Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now assert_eq!(Babe::next_expected_epoch_change(System::block_number()), Some(5 + 2)); }) } @@ -227,11 +223,11 @@ fn can_predict_next_epoch_change() { #[test] fn can_enact_next_config() { new_test_ext(1).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); + assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(Babe::genesis_slot(), 6); - assert_eq!(Babe::current_slot(), 6); + assert_eq!(*Babe::genesis_slot(), 6); + assert_eq!(*Babe::current_slot(), 6); assert_eq!(Babe::epoch_index(), 0); go_to_block(2, 7); @@ -256,6 +252,39 @@ fn can_enact_next_config() { }); } +#[test] +fn can_fetch_current_and_next_epoch_data() { + new_test_ext(5).execute_with(|| { + // genesis authorities should be used for the first and second epoch + assert_eq!( + Babe::current_epoch().authorities, + Babe::next_epoch().authorities, + ); + + // 1 era = 3 epochs + // 1 epoch = 3 slots + // Eras start from 0. + // Therefore at era 1 we should be starting epoch 3 with slot 10. + start_era(1); + + let current_epoch = Babe::current_epoch(); + assert_eq!(current_epoch.epoch_index, 3); + assert_eq!(*current_epoch.start_slot, 10); + assert_eq!(current_epoch.authorities.len(), 5); + + let next_epoch = Babe::next_epoch(); + assert_eq!(next_epoch.epoch_index, 4); + assert_eq!(*next_epoch.start_slot, 13); + assert_eq!(next_epoch.authorities.len(), 5); + + // the on-chain randomness should always change across epochs + assert!(current_epoch.randomness != next_epoch.randomness); + + // but in this case the authorities stay the same + assert!(current_epoch.authorities == next_epoch.authorities); + }); +} + #[test] fn report_equivocation_current_session_works() { let (pairs, mut ext) = new_test_ext_with_pairs(3); @@ -543,7 +572,7 @@ fn report_equivocation_invalid_equivocation_proof() { &offending_authority_pair, CurrentSlot::get(), ); - equivocation_proof.slot_number = 0; + equivocation_proof.slot = Slot::from(0); assert_invalid_equivocation(equivocation_proof.clone()); // different slot numbers in headers @@ -582,8 +611,8 @@ fn report_equivocation_invalid_equivocation_proof() { #[test] fn report_equivocation_validate_unsigned_prevents_duplicates() { use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, - TransactionValidity, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, }; let (pairs, mut ext) = new_test_ext_with_pairs(3); @@ -635,7 +664,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { priority: TransactionPriority::max_value(), requires: vec![], provides: vec![("BabeEquivocation", tx_tag).encode()], - longevity: TransactionLongevity::max_value(), + longevity: ReportLongevity::get(), propagate: false, }) ); @@ -647,7 +676,16 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) .unwrap(); - // the report should now be considered stale and the transaction is invalid + // the report should now be considered stale and the transaction is invalid. + // the check for staleness should be done on both `validate_unsigned` and on `pre_dispatch` + assert_err!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + InvalidTransaction::Stale, + ); + assert_err!( ::pre_dispatch(&inner), InvalidTransaction::Stale, @@ -661,7 +699,7 @@ fn report_equivocation_has_valid_weight() { // but there's a lower bound of 100 validators. assert!( (1..=100) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] == w[1]) @@ -671,7 +709,7 @@ fn report_equivocation_has_valid_weight() { // with every extra validator. assert!( (100..=1000) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] < w[1]) diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 21c8abbc24a6c..39b7fda77fef7 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-balances" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-transaction-payment = { version = "3.0.0", path = "../transaction-payment" } [features] default = ["std"] diff --git a/frame/balances/README.md b/frame/balances/README.md index 4104fdc641975..cbbfea75e6848 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -62,7 +62,7 @@ dealing with accounts that allow liquidity restrictions. - [`Imbalance`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Imbalance.html): Functions for handling imbalances between total issuance in the system and account balances. Must be used when a function creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). -- [`IsDeadAccount`](https://docs.rs/frame-system/latest/frame_system/trait.IsDeadAccount.html): Determiner to say whether a +- [`IsDeadAccount`](https://docs.rs/frame-support/latest/frame_support/traits/trait.IsDeadAccount.html): Determiner to say whether a given account is unused. ## Interface @@ -83,8 +83,8 @@ The Contract module uses the `Currency` trait to handle gas payment, and its typ ```rust use frame_support::traits::Currency; -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; ``` @@ -93,11 +93,11 @@ The Staking module uses the `LockableCurrency` trait to lock a stash account's f ```rust use frame_support::traits::{WithdrawReasons, LockableCurrency}; use sp_runtime::traits::Bounded; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { type Currency: LockableCurrency; } -fn update_ledger( +fn update_ledger( controller: &T::AccountId, ledger: &StakingLedger ) { @@ -117,6 +117,6 @@ The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-bala ## Assumptions -* Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. +* Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 078d74006ba2f..53cf273d850de 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,8 +33,6 @@ const ED_MULTIPLIER: u32 = 10; benchmarks! { - _ { } - // Benchmark `transfer` extrinsic with the worst possible conditions: // * Transfer will kill the sender account. // * Transfer will create the recipient account. diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 6c9d3adfedaa7..e3eb9478b6493 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Balances Module +//! # Balances Pallet //! -//! The Balances module provides functionality for handling accounts and balances. +//! The Balances pallet provides functionality for handling accounts and balances. //! -//! - [`balances::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! The Balances module provides functions for: +//! The Balances pallet provides functions for: //! //! - Getting and setting free balances. //! - Retrieving total, reserved and unreserved balances. @@ -43,7 +43,7 @@ //! fall below this, then the account is said to be dead; and it loses its functionality as well as any //! prior history and all information on it is removed from the chain's state. //! No account should ever have a total balance that is strictly between 0 and the existential -//! deposit (exclusive). If this ever happens, it indicates either a bug in this module or an +//! deposit (exclusive). If this ever happens, it indicates either a bug in this pallet or an //! erroneous raw mutation of storage. //! //! - **Total Issuance:** The total number of units in existence in a system. @@ -67,20 +67,18 @@ //! //! ### Implementations //! -//! The Balances module provides implementations for the following traits. If these traits provide the functionality -//! that you need, then you can avoid coupling with the Balances module. +//! The Balances pallet provides implementations for the following traits. If these traits provide the functionality +//! that you need, then you can avoid coupling with the Balances pallet. //! -//! - [`Currency`](../frame_support/traits/trait.Currency.html): Functions for dealing with a +//! - [`Currency`](frame_support::traits::Currency): Functions for dealing with a //! fungible assets system. -//! - [`ReservableCurrency`](../frame_support/traits/trait.ReservableCurrency.html): +//! - [`ReservableCurrency`](frame_support::traits::ReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. -//! - [`LockableCurrency`](../frame_support/traits/trait.LockableCurrency.html): Functions for +//! - [`LockableCurrency`](frame_support::traits::LockableCurrency): Functions for //! dealing with accounts that allow liquidity restrictions. -//! - [`Imbalance`](../frame_support/traits/trait.Imbalance.html): Functions for handling +//! - [`Imbalance`](frame_support::traits::Imbalance): Functions for handling //! imbalances between total issuance in the system and account balances. Must be used when a function //! creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). -//! - [`IsDeadAccount`](../frame_system/trait.IsDeadAccount.html): Determiner to say whether a -//! given account is unused. //! //! ## Interface //! @@ -91,40 +89,40 @@ //! //! ## Usage //! -//! The following examples show how to use the Balances module in your custom module. +//! The following examples show how to use the Balances pallet in your custom pallet. //! //! ### Examples from the FRAME //! -//! The Contract module uses the `Currency` trait to handle gas payment, and its types inherit from `Currency`: +//! The Contract pallet uses the `Currency` trait to handle gas payment, and its types inherit from `Currency`: //! //! ``` //! use frame_support::traits::Currency; -//! # pub trait Trait: frame_system::Trait { +//! # pub trait Config: frame_system::Config { //! # type Currency: Currency; //! # } //! -//! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -//! pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +//! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +//! pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; //! //! # fn main() {} //! ``` //! -//! The Staking module uses the `LockableCurrency` trait to lock a stash account's funds: +//! The Staking pallet uses the `LockableCurrency` trait to lock a stash account's funds: //! //! ``` //! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; -//! pub trait Trait: frame_system::Trait { +//! pub trait Config: frame_system::Config { //! type Currency: LockableCurrency; //! } -//! # struct StakingLedger { -//! # stash: ::AccountId, -//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, +//! # struct StakingLedger { +//! # stash: ::AccountId, +//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, //! # phantom: std::marker::PhantomData, //! # } //! # const STAKING_ID: [u8; 8] = *b"staking "; //! -//! fn update_ledger( +//! fn update_ledger( //! controller: &T::AccountId, //! ledger: &StakingLedger //! ) { @@ -141,11 +139,11 @@ //! //! ## Genesis config //! -//! The Balances module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Balances pallet depends on the [`GenesisConfig`]. //! //! ## Assumptions //! -//! * Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. +//! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. #![cfg_attr(not(feature = "std"), no_std)] @@ -157,109 +155,247 @@ mod benchmarking; pub mod weights; use sp_std::prelude::*; -use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr, convert::Infallible}; +use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr}; use codec::{Codec, Encode, Decode}; use frame_support::{ - StorageValue, Parameter, decl_event, decl_storage, decl_module, decl_error, ensure, + ensure, traits::{ - Currency, OnKilledAccount, OnUnbalanced, TryDrop, StoredMap, + Currency, OnUnbalanced, TryDrop, StoredMap, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, IsDeadAccount, BalanceStatus as Status, + ExistenceRequirement::AllowDeath, BalanceStatus as Status, } }; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; use sp_runtime::{ RuntimeDebug, DispatchResult, DispatchError, traits::{ - Zero, AtLeast32BitUnsigned, StaticLookup, Member, CheckedAdd, CheckedSub, - MaybeSerializeDeserialize, Saturating, Bounded, + Zero, AtLeast32BitUnsigned, StaticLookup, CheckedAdd, CheckedSub, + MaybeSerializeDeserialize, Saturating, Bounded, StoredMapError, }, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system as system; pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; pub use weights::WeightInfo; -pub trait Subtrait: frame_system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug; +pub use pallet::*; - /// The minimum amount required to keep an account open. - type ExistentialDeposit: Get; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// The means of storing the balances of an account. - type AccountStore: StoredMap>; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The balance of an account. + type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + + MaybeSerializeDeserialize + Debug; - /// Weight information for the extrinsics in this pallet. - type WeightInfo: WeightInfo; + /// Handler for the unbalanced reduction when removing a dust account. + type DustRemoval: OnUnbalanced>; - /// The maximum number of locks that should exist on an account. - /// Not strictly enforced, but used for weight estimation. - type MaxLocks: Get; -} + /// The overarching event type. + type Event: From> + IsType<::Event>; -pub trait Trait: frame_system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug; + /// The minimum amount required to keep an account open. + #[pallet::constant] + type ExistentialDeposit: Get; - /// Handler for the unbalanced reduction when removing a dust account. - type DustRemoval: OnUnbalanced>; + /// The means of storing the balances of an account. + type AccountStore: StoredMap>; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; - /// The minimum amount required to keep an account open. - type ExistentialDeposit: Get; + /// The maximum number of locks that should exist on an account. + /// Not strictly enforced, but used for weight estimation. + type MaxLocks: Get; + } - /// The means of storing the balances of an account. - type AccountStore: StoredMap>; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + } - /// The maximum number of locks that should exist on an account. - /// Not strictly enforced, but used for weight estimation. - type MaxLocks: Get; -} + #[pallet::call] + impl, I: 'static> Pallet { + /// Transfer some liquid free balance to another account. + /// + /// `transfer` will set the `FreeBalance` of the sender and receiver. + /// It will decrease the total issuance of the system by the `TransferFee`. + /// If the sender's account is below the existential deposit as a result + /// of the transfer, the account will be reaped. + /// + /// The dispatch origin for this call must be `Signed` by the transactor. + /// + /// # + /// - Dependent on arguments but not critical, given proper implementations for + /// input config types. See related functions below. + /// - It contains a limited number of reads and writes internally and no complex computation. + /// + /// Related functions: + /// + /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. + /// - Transferring balances to accounts that did not exist before will cause + /// `T::OnNewAccount::on_new_account` to be called. + /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. + /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional + /// check that the transfer will not kill the origin account. + /// --------------------------------- + /// - Base Weight: 73.64 µs, worst case scenario (account created, account removed) + /// - DB Weight: 1 Read and 1 Write to destination account + /// - Origin account is already in memory, so no DB operations for them. + /// # + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + dest: ::Source, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?; + Ok(().into()) + } -impl, I: Instance> Subtrait for T { - type Balance = T::Balance; - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; - type WeightInfo = >::WeightInfo; - type MaxLocks = T::MaxLocks; -} + /// Set the balances of a given account. + /// + /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will + /// also decrease the total issuance of the system (`TotalIssuance`). + /// If the new free or reserved balance is below the existential deposit, + /// it will reset the account nonce (`frame_system::AccountNonce`). + /// + /// The dispatch origin for this call is `root`. + /// + /// # + /// - Independent of the arguments. + /// - Contains a limited number of reads and writes. + /// --------------------- + /// - Base Weight: + /// - Creating: 27.56 µs + /// - Killing: 35.11 µs + /// - DB Weight: 1 Read, 1 Write to `who` + /// # + #[pallet::weight( + T::WeightInfo::set_balance_creating() // Creates a new account. + .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. + )] + pub(super) fn set_balance( + origin: OriginFor, + who: ::Source, + #[pallet::compact] new_free: T::Balance, + #[pallet::compact] new_reserved: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let existential_deposit = T::ExistentialDeposit::get(); -decl_event!( - pub enum Event where - ::AccountId, - >::Balance - { + let wipeout = new_free + new_reserved < existential_deposit; + let new_free = if wipeout { Zero::zero() } else { new_free }; + let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; + + let (free, reserved) = Self::mutate_account(&who, |account| { + if new_free > account.free { + mem::drop(PositiveImbalance::::new(new_free - account.free)); + } else if new_free < account.free { + mem::drop(NegativeImbalance::::new(account.free - new_free)); + } + + if new_reserved > account.reserved { + mem::drop(PositiveImbalance::::new(new_reserved - account.reserved)); + } else if new_reserved < account.reserved { + mem::drop(NegativeImbalance::::new(account.reserved - new_reserved)); + } + + account.free = new_free; + account.reserved = new_reserved; + + (account.free, account.reserved) + })?; + Self::deposit_event(Event::BalanceSet(who, free, reserved)); + Ok(().into()) + } + + /// Exactly as `transfer`, except the origin must be root and the source account may be + /// specified. + /// # + /// - Same as transfer, but additional read and write because the source account is + /// not assumed to be in the overlay. + /// # + #[pallet::weight(T::WeightInfo::force_transfer())] + pub fn force_transfer( + origin: OriginFor, + source: ::Source, + dest: ::Source, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let source = T::Lookup::lookup(source)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?; + Ok(().into()) + } + + /// Same as the [`transfer`] call, but with a check that the transfer will not kill the + /// origin account. + /// + /// 99% of the time you want [`transfer`] instead. + /// + /// [`transfer`]: struct.Pallet.html#method.transfer + /// # + /// - Cheaper than transfer because account cannot be killed. + /// - Base Weight: 51.4 µs + /// - DB Weight: 1 Read and 1 Write to dest (sender is in overlay already) + /// # + #[pallet::weight(T::WeightInfo::transfer_keep_alive())] + pub fn transfer_keep_alive( + origin: OriginFor, + dest: ::Source, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&transactor, &dest, value, KeepAlive)?; + Ok(().into()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", T::Balance = "Balance")] + pub enum Event, I: 'static = ()> { /// An account was created with some free balance. \[account, free_balance\] - Endowed(AccountId, Balance), + Endowed(T::AccountId, T::Balance), /// An account was removed whose balance was non-zero but below ExistentialDeposit, /// resulting in an outright loss. \[account, balance\] - DustLost(AccountId, Balance), + DustLost(T::AccountId, T::Balance), /// Transfer succeeded. \[from, to, value\] - Transfer(AccountId, AccountId, Balance), + Transfer(T::AccountId, T::AccountId, T::Balance), /// A balance was set by root. \[who, free, reserved\] - BalanceSet(AccountId, Balance, Balance), + BalanceSet(T::AccountId, T::Balance, T::Balance), /// Some amount was deposited (e.g. for transaction fees). \[who, deposit\] - Deposit(AccountId, Balance), + Deposit(T::AccountId, T::Balance), /// Some balance was reserved (moved from free to reserved). \[who, value\] - Reserved(AccountId, Balance), + Reserved(T::AccountId, T::Balance), /// Some balance was unreserved (moved from reserved to free). \[who, value\] - Unreserved(AccountId, Balance), + Unreserved(T::AccountId, T::Balance), /// Some balance was moved from the reserve of the first account to the second account. /// Final argument indicates the destination balance type. /// \[from, to, balance, destination_status\] - ReserveRepatriated(AccountId, AccountId, Balance, Status), + ReserveRepatriated(T::AccountId, T::AccountId, T::Balance, Status), } -); -decl_error! { - pub enum Error for Module, I: Instance> { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { /// Vesting balance too high to send value VestingBalance, /// Account liquidity restrictions prevent withdrawal @@ -277,6 +413,107 @@ decl_error! { /// Beneficiary account must pre-exist DeadAccount, } + + /// The total units issued in the system. + #[pallet::storage] + #[pallet::getter(fn total_issuance)] + pub type TotalIssuance, I: 'static = ()> = StorageValue<_, T::Balance, ValueQuery>; + + /// The balance of an account. + /// + /// NOTE: This is only used in the case that this pallet is used to store balances. + #[pallet::storage] + pub type Account, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + AccountData, + ValueQuery + >; + + /// Any liquidity locks on some account balances. + /// NOTE: Should only be accessed when setting, changing and freeing a lock. + #[pallet::storage] + #[pallet::getter(fn locks)] + pub type Locks, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + Vec>, + ValueQuery + >; + + /// Storage version of the pallet. + /// + /// This is set to v2.0.0 for new networks. + #[pallet::storage] + pub(super) type StorageVersion, I: 'static = ()> = StorageValue< + _, + Releases, + ValueQuery + >; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub balances: Vec<(T::AccountId, T::Balance)>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + balances: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + let total = self.balances + .iter() + .fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n); + >::put(total); + + >::put(Releases::V2_0_0); + + for (_, balance) in &self.balances { + assert!( + *balance >= >::ExistentialDeposit::get(), + "the balance of any account should always be at least the existential deposit.", + ) + } + + // ensure no duplicates exist. + let endowed_accounts = self.balances.iter().map(|(x, _)| x).cloned().collect::>(); + + assert!(endowed_accounts.len() == self.balances.len(), "duplicate balances in genesis."); + + for &(ref who, free) in self.balances.iter() { + assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() }).is_ok()); + } + } + } +} + +#[cfg(feature = "std")] +impl, I: 'static> GenesisConfig { + /// Direct implementation of `GenesisBuild::build_storage`. + /// + /// Kept in order not to break dependency. + pub fn build_storage(&self) -> Result { + >::build_storage(self) + } + + /// Direct implementation of `GenesisBuild::assimilate_storage`. + /// + /// Kept in order not to break dependency. + pub fn assimilate_storage( + &self, + storage: &mut sp_runtime::Storage + ) -> Result<(), String> { + >::assimilate_storage(self, storage) + } } /// Simplified reasons for withdrawing balance. @@ -381,193 +618,7 @@ impl Default for Releases { } } -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Balances { - /// The total units issued in the system. - pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { - config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) - }): T::Balance; - - /// The balance of an account. - /// - /// NOTE: This is only used in the case that this module is used to store balances. - pub Account: map hasher(blake2_128_concat) T::AccountId => AccountData; - - /// Any liquidity locks on some account balances. - /// NOTE: Should only be accessed when setting, changing and freeing a lock. - pub Locks get(fn locks): map hasher(blake2_128_concat) T::AccountId => Vec>; - - /// Storage version of the pallet. - /// - /// This is set to v2.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; - } - add_extra_genesis { - config(balances): Vec<(T::AccountId, T::Balance)>; - // ^^ begin, length, amount liquid at genesis - build(|config: &GenesisConfig| { - for (_, balance) in &config.balances { - assert!( - *balance >= >::ExistentialDeposit::get(), - "the balance of any account should always be more than existential deposit.", - ) - } - for &(ref who, free) in config.balances.iter() { - T::AccountStore::insert(who, AccountData { free, .. Default::default() }); - } - }); - } -} - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum amount required to keep an account open. - const ExistentialDeposit: T::Balance = T::ExistentialDeposit::get(); - - fn deposit_event() = default; - - /// Transfer some liquid free balance to another account. - /// - /// `transfer` will set the `FreeBalance` of the sender and receiver. - /// It will decrease the total issuance of the system by the `TransferFee`. - /// If the sender's account is below the existential deposit as a result - /// of the transfer, the account will be reaped. - /// - /// The dispatch origin for this call must be `Signed` by the transactor. - /// - /// # - /// - Dependent on arguments but not critical, given proper implementations for - /// input config types. See related functions below. - /// - It contains a limited number of reads and writes internally and no complex computation. - /// - /// Related functions: - /// - /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. - /// - Transferring balances to accounts that did not exist before will cause - /// `T::OnNewAccount::on_new_account` to be called. - /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. - /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional - /// check that the transfer will not kill the origin account. - /// --------------------------------- - /// - Base Weight: 73.64 µs, worst case scenario (account created, account removed) - /// - DB Weight: 1 Read and 1 Write to destination account - /// - Origin account is already in memory, so no DB operations for them. - /// # - #[weight = T::WeightInfo::transfer()] - pub fn transfer( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?; - } - - /// Set the balances of a given account. - /// - /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will - /// also decrease the total issuance of the system (`TotalIssuance`). - /// If the new free or reserved balance is below the existential deposit, - /// it will reset the account nonce (`frame_system::AccountNonce`). - /// - /// The dispatch origin for this call is `root`. - /// - /// # - /// - Independent of the arguments. - /// - Contains a limited number of reads and writes. - /// --------------------- - /// - Base Weight: - /// - Creating: 27.56 µs - /// - Killing: 35.11 µs - /// - DB Weight: 1 Read, 1 Write to `who` - /// # - #[weight = T::WeightInfo::set_balance_creating() // Creates a new account. - .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. - ] - fn set_balance( - origin, - who: ::Source, - #[compact] new_free: T::Balance, - #[compact] new_reserved: T::Balance - ) { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let existential_deposit = T::ExistentialDeposit::get(); - - let wipeout = new_free + new_reserved < existential_deposit; - let new_free = if wipeout { Zero::zero() } else { new_free }; - let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; - - let (free, reserved) = Self::mutate_account(&who, |account| { - if new_free > account.free { - mem::drop(PositiveImbalance::::new(new_free - account.free)); - } else if new_free < account.free { - mem::drop(NegativeImbalance::::new(account.free - new_free)); - } - - if new_reserved > account.reserved { - mem::drop(PositiveImbalance::::new(new_reserved - account.reserved)); - } else if new_reserved < account.reserved { - mem::drop(NegativeImbalance::::new(account.reserved - new_reserved)); - } - - account.free = new_free; - account.reserved = new_reserved; - - (account.free, account.reserved) - }); - Self::deposit_event(RawEvent::BalanceSet(who, free, reserved)); - } - - /// Exactly as `transfer`, except the origin must be root and the source account may be - /// specified. - /// # - /// - Same as transfer, but additional read and write because the source account is - /// not assumed to be in the overlay. - /// # - #[weight = T::WeightInfo::force_transfer()] - pub fn force_transfer( - origin, - source: ::Source, - dest: ::Source, - #[compact] value: T::Balance - ) { - ensure_root(origin)?; - let source = T::Lookup::lookup(source)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?; - } - - /// Same as the [`transfer`] call, but with a check that the transfer will not kill the - /// origin account. - /// - /// 99% of the time you want [`transfer`] instead. - /// - /// [`transfer`]: struct.Module.html#method.transfer - /// # - /// - Cheaper than transfer because account cannot be killed. - /// - Base Weight: 51.4 µs - /// - DB Weight: 1 Read and 1 Write to dest (sender is in overlay already) - /// # - #[weight = T::WeightInfo::transfer_keep_alive()] - pub fn transfer_keep_alive( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, KeepAlive)?; - } - } -} - -impl, I: Instance> Module { - // PRIVATE MUTABLES - +impl, I: 'static> Pallet { /// Get the free balance of an account. pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { Self::account(who.borrow()).free @@ -609,7 +660,7 @@ impl, I: Instance> Module { if total < T::ExistentialDeposit::get() { if !total.is_zero() { T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); - Self::deposit_event(RawEvent::DustLost(who.clone(), total)); + Self::deposit_event(Event::DustLost(who.clone(), total)); } None } else { @@ -628,9 +679,8 @@ impl, I: Instance> Module { pub fn mutate_account( who: &T::AccountId, f: impl FnOnce(&mut AccountData) -> R - ) -> R { - Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) - .expect("Error is infallible; qed") + ) -> Result { + Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce @@ -642,7 +692,7 @@ impl, I: Instance> Module { /// /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that /// the caller will do this. - fn try_mutate_account( + fn try_mutate_account>( who: &T::AccountId, f: impl FnOnce(&mut AccountData, bool) -> Result ) -> Result { @@ -656,7 +706,7 @@ impl, I: Instance> Module { }) }).map(|(maybe_endowed, result)| { if let Some(endowed) = maybe_endowed { - Self::deposit_event(RawEvent::Endowed(who.clone(), endowed)); + Self::deposit_event(Event::Endowed(who.clone(), endowed)); } result }) @@ -670,7 +720,8 @@ impl, I: Instance> Module { A runtime configuration adjustment may be needed." ); } - Self::mutate_account(who, |b| { + // No way this can fail since we do not alter the existential balances. + let _ = Self::mutate_account(who, |b| { b.misc_frozen = Zero::zero(); b.fee_frozen = Zero::zero(); for l in locks.iter() { @@ -689,12 +740,20 @@ impl, I: Instance> Module { if existed { // TODO: use Locks::::hashed_key // https://github.com/paritytech/substrate/issues/4969 - system::Module::::dec_ref(who); + system::Pallet::::dec_consumers(who); } } else { Locks::::insert(who, locks); if !existed { - system::Module::::inc_ref(who); + if system::Pallet::::inc_consumers(who).is_err() { + // No providers for the locks. This is impossible under normal circumstances + // since the funds that are under the lock will themselves be stored in the + // account and therefore will need a reference. + frame_support::debug::warn!( + "Warning: Attempt to introduce lock consumer reference, yet no providers. \ + This is unexpected but should be safe." + ); + } } } } @@ -704,17 +763,18 @@ impl, I: Instance> Module { // of the inner member. mod imbalances { use super::{ - result, Subtrait, DefaultInstance, Imbalance, Trait, Zero, Instance, Saturating, - StorageValue, TryDrop, + result, Imbalance, Config, Zero, Saturating, + TryDrop, RuntimeDebug, }; use sp_std::mem; /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. #[must_use] - pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); + #[derive(RuntimeDebug, PartialEq, Eq)] + pub struct PositiveImbalance, I: 'static>(T::Balance); - impl, I: Instance> PositiveImbalance { + impl, I: 'static> PositiveImbalance { /// Create a new positive imbalance from a balance. pub fn new(amount: T::Balance) -> Self { PositiveImbalance(amount) @@ -724,22 +784,23 @@ mod imbalances { /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been destroyed without any equal and opposite accounting. #[must_use] - pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); + #[derive(RuntimeDebug, PartialEq, Eq)] + pub struct NegativeImbalance, I: 'static>(T::Balance); - impl, I: Instance> NegativeImbalance { + impl, I: 'static> NegativeImbalance { /// Create a new negative imbalance from a balance. pub fn new(amount: T::Balance) -> Self { NegativeImbalance(amount) } } - impl, I: Instance> TryDrop for PositiveImbalance { + impl, I: 'static> TryDrop for PositiveImbalance { fn try_drop(self) -> result::Result<(), Self> { self.drop_zero() } } - impl, I: Instance> Imbalance for PositiveImbalance { + impl, I: 'static> Imbalance for PositiveImbalance { type Opposite = NegativeImbalance; fn zero() -> Self { @@ -784,13 +845,13 @@ mod imbalances { } } - impl, I: Instance> TryDrop for NegativeImbalance { + impl, I: 'static> TryDrop for NegativeImbalance { fn try_drop(self) -> result::Result<(), Self> { self.drop_zero() } } - impl, I: Instance> Imbalance for NegativeImbalance { + impl, I: 'static> Imbalance for NegativeImbalance { type Opposite = PositiveImbalance; fn zero() -> Self { @@ -835,82 +896,26 @@ mod imbalances { } } - impl, I: Instance> Drop for PositiveImbalance { + impl, I: 'static> Drop for PositiveImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - , I>>::mutate( + >::mutate( |v| *v = v.saturating_add(self.0) ); } } - impl, I: Instance> Drop for NegativeImbalance { + impl, I: 'static> Drop for NegativeImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - , I>>::mutate( + >::mutate( |v| *v = v.saturating_sub(self.0) ); } } } -// TODO: #2052 -// Somewhat ugly hack in order to gain access to module's `increase_total_issuance_by` -// using only the Subtrait (which defines only the types that are not dependent -// on Positive/NegativeImbalance). Subtrait must be used otherwise we end up with a -// circular dependency with Trait having some types be dependent on PositiveImbalance -// and PositiveImbalance itself depending back on Trait for its Drop impl (and thus -// its type declaration). -// This works as long as `increase_total_issuance_by` doesn't use the Imbalance -// types (basically for charging fees). -// This should eventually be refactored so that the type item that -// depends on the Imbalance type (DustRemoval) is placed in its own pallet. -struct ElevatedTrait, I: Instance>(T, I); -impl, I: Instance> Clone for ElevatedTrait { - fn clone(&self) -> Self { unimplemented!() } -} -impl, I: Instance> PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { unimplemented!() } -} -impl, I: Instance> Eq for ElevatedTrait {} -impl, I: Instance> frame_system::Trait for ElevatedTrait { - type BaseCallFilter = T::BaseCallFilter; - type Origin = T::Origin; - type Call = T::Call; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type BlockHashCount = T::BlockHashCount; - type MaximumBlockWeight = T::MaximumBlockWeight; - type DbWeight = T::DbWeight; - type BlockExecutionWeight = T::BlockExecutionWeight; - type ExtrinsicBaseWeight = T::ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = T::MaximumBlockWeight; - type MaximumBlockLength = T::MaximumBlockLength; - type AvailableBlockRatio = T::AvailableBlockRatio; - type Version = T::Version; - type PalletInfo = T::PalletInfo; - type OnNewAccount = T::OnNewAccount; - type OnKilledAccount = T::OnKilledAccount; - type AccountData = T::AccountData; - type SystemWeightInfo = T::SystemWeightInfo; -} -impl, I: Instance> Trait for ElevatedTrait { - type Balance = T::Balance; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; - type WeightInfo = >::WeightInfo; - type MaxLocks = T::MaxLocks; -} - -impl, I: Instance> Currency for Module where +impl, I: 'static> Currency for Pallet where T::Balance: MaybeSerializeDeserialize + Debug { type Balance = T::Balance; @@ -972,7 +977,7 @@ impl, I: Instance> Currency for Module where // // # // Despite iterating over a list of locks, they are limited by the number of - // lock IDs, which means the number of runtime modules that intend to use and create locks. + // lock IDs, which means the number of runtime pallets that intend to use and create locks. // # fn ensure_can_withdraw( who: &T::AccountId, @@ -1013,10 +1018,12 @@ impl, I: Instance> Currency for Module where value, WithdrawReasons::TRANSFER, from_account.free, - )?; + ).map_err(|_| Error::::LiquidityRestrictions)?; + // TODO: This is over-conservative. There may now be other providers, and this pallet + // may not even be a provider. let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; - let allow_death = allow_death && system::Module::::allow_death(transactor); + let allow_death = allow_death && !system::Pallet::::is_provider_required(transactor); ensure!(allow_death || from_account.free >= ed, Error::::KeepAlive); Ok(()) @@ -1024,7 +1031,7 @@ impl, I: Instance> Currency for Module where })?; // Emit transfer event. - Self::deposit_event(RawEvent::Transfer(transactor.clone(), dest.clone(), value)); + Self::deposit_event(Event::Transfer(transactor.clone(), dest.clone(), value)); Ok(()) } @@ -1032,7 +1039,7 @@ impl, I: Instance> Currency for Module where /// Slash a target account `who`, returning the negative imbalance created and any left over /// amount that could not be slashed. /// - /// Is a no-op if `value` to be slashed is zero. + /// Is a no-op if `value` to be slashed is zero or the account does not exist. /// /// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having @@ -1043,20 +1050,48 @@ impl, I: Instance> Currency for Module where value: Self::Balance ) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - - Self::mutate_account(who, |account| { - let free_slash = cmp::min(account.free, value); - account.free -= free_slash; - - let remaining_slash = value - free_slash; - if !remaining_slash.is_zero() { - let reserved_slash = cmp::min(account.reserved, remaining_slash); - account.reserved -= reserved_slash; - (NegativeImbalance::new(free_slash + reserved_slash), remaining_slash - reserved_slash) - } else { - (NegativeImbalance::new(value), Zero::zero()) + if Self::total_balance(&who).is_zero() { return (NegativeImbalance::zero(), value) } + + for attempt in 0..2 { + match Self::try_mutate_account(who, + |account, _is_new| -> Result<(Self::NegativeImbalance, Self::Balance), StoredMapError> { + // Best value is the most amount we can slash following liveness rules. + let best_value = match attempt { + // First attempt we try to slash the full amount, and see if liveness issues happen. + 0 => value, + // If acting as a critical provider (i.e. first attempt failed), then slash + // as much as possible while leaving at least at ED. + _ => value.min((account.free + account.reserved).saturating_sub(T::ExistentialDeposit::get())), + }; + + let free_slash = cmp::min(account.free, best_value); + account.free -= free_slash; // Safe because of above check + let remaining_slash = best_value - free_slash; // Safe because of above check + + if !remaining_slash.is_zero() { + // If we have remaining slash, take it from reserved balance. + let reserved_slash = cmp::min(account.reserved, remaining_slash); + account.reserved -= reserved_slash; // Safe because of above check + Ok(( + NegativeImbalance::new(free_slash + reserved_slash), + value - free_slash - reserved_slash, // Safe because value is gt or eq total slashed + )) + } else { + // Else we are done! + Ok(( + NegativeImbalance::new(free_slash), + value - free_slash, // Safe because value is gt or eq to total slashed + )) + } + } + ) { + Ok(r) => return r, + Err(_) => (), } - }) + } + + // Should never get here. But we'll be defensive anyway. + (Self::NegativeImbalance::zero(), value) } /// Deposit some `value` into the free balance of an existing target account `who`. @@ -1079,7 +1114,8 @@ impl, I: Instance> Currency for Module where /// /// This function is a no-op if: /// - the `value` to be deposited is zero; or - /// - if the `value` to be deposited is less than the ED and the account does not yet exist; or + /// - the `value` to be deposited is less than the required ED and the account does not yet exist; or + /// - the deposit would necessitate the account to exist and there are no provider references; or /// - `value` is so large it would cause the balance of `who` to overflow. fn deposit_creating( who: &T::AccountId, @@ -1087,17 +1123,22 @@ impl, I: Instance> Currency for Module where ) -> Self::PositiveImbalance { if value.is_zero() { return Self::PositiveImbalance::zero() } - Self::try_mutate_account(who, |account, is_new| -> Result { - // bail if not yet created and this operation wouldn't be enough to create it. + let r = Self::try_mutate_account(who, |account, is_new| -> Result { + let ed = T::ExistentialDeposit::get(); - ensure!(value >= ed || !is_new, Self::PositiveImbalance::zero()); + ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); // defensive only: overflow should never happen, however in case it does, then this // operation is a no-op. - account.free = account.free.checked_add(&value).ok_or_else(|| Self::PositiveImbalance::zero())?; + account.free = match account.free.checked_add(&value) { + Some(x) => x, + None => return Ok(Self::PositiveImbalance::zero()), + }; Ok(PositiveImbalance::new(value)) - }).unwrap_or_else(|x| x) + }).unwrap_or_else(|_| Self::PositiveImbalance::zero()); + + r } /// Withdraw some free balance from an account, respecting existence requirements. @@ -1136,9 +1177,10 @@ impl, I: Instance> Currency for Module where -> SignedImbalance { Self::try_mutate_account(who, |account, is_new| - -> Result, ()> + -> Result, DispatchError> { let ed = T::ExistentialDeposit::get(); + let total = value.saturating_add(account.reserved); // If we're attempting to set an existing account to less than ED, then // bypass the entire operation. It's a no-op if you follow it through, but // since this is an instance where we might account for a negative imbalance @@ -1146,7 +1188,7 @@ impl, I: Instance> Currency for Module where // equal and opposite cause (returned as an Imbalance), then in the // instance that there's no other accounts on the system at all, we might // underflow the issuance and our arithmetic will be off. - ensure!(value.saturating_add(account.reserved) >= ed || !is_new, ()); + ensure!(total >= ed || !is_new, Error::::ExistentialDeposit); let imbalance = if account.free <= value { SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) @@ -1159,7 +1201,7 @@ impl, I: Instance> Currency for Module where } } -impl, I: Instance> ReservableCurrency for Module where +impl, I: 'static> ReservableCurrency for Pallet where T::Balance: MaybeSerializeDeserialize + Debug { /// Check if `who` can reserve `value` from their free balance. @@ -1190,45 +1232,74 @@ impl, I: Instance> ReservableCurrency for Module Self::ensure_can_withdraw(&who, value.clone(), WithdrawReasons::RESERVE, account.free) })?; - Self::deposit_event(RawEvent::Reserved(who.clone(), value)); + Self::deposit_event(Event::Reserved(who.clone(), value)); Ok(()) } /// Unreserve some funds, returning any amount that was unable to be unreserved. /// - /// Is a no-op if the value to be unreserved is zero. + /// Is a no-op if the value to be unreserved is zero or the account does not exist. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { if value.is_zero() { return Zero::zero() } + if Self::total_balance(&who).is_zero() { return value } - let actual = Self::mutate_account(who, |account| { + let actual = match Self::mutate_account(who, |account| { let actual = cmp::min(account.reserved, value); account.reserved -= actual; // defensive only: this can never fail since total issuance which is at least free+reserved // fits into the same data type. account.free = account.free.saturating_add(actual); actual - }); + }) { + Ok(x) => x, + Err(_) => { + // This should never happen since we don't alter the total amount in the account. + // If it ever does, then we should fail gracefully though, indicating that nothing + // could be done. + return value + } + }; - Self::deposit_event(RawEvent::Unreserved(who.clone(), actual.clone())); + Self::deposit_event(Event::Unreserved(who.clone(), actual.clone())); value - actual } /// Slash from reserved balance, returning the negative imbalance created, /// and any amount that was unable to be slashed. /// - /// Is a no-op if the value to be slashed is zero. + /// Is a no-op if the value to be slashed is zero or the account does not exist. fn slash_reserved( who: &T::AccountId, value: Self::Balance ) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - - Self::mutate_account(who, |account| { - // underflow should never happen, but it if does, there's nothing to be done here. - let actual = cmp::min(account.reserved, value); - account.reserved -= actual; - (NegativeImbalance::new(actual), value - actual) - }) + if Self::total_balance(&who).is_zero() { return (NegativeImbalance::zero(), value) } + + // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an + // account is attempted to be illegally destroyed. + + for attempt in 0..2 { + match Self::mutate_account(who, |account| { + let best_value = match attempt { + 0 => value, + // If acting as a critical provider (i.e. first attempt failed), then ensure + // slash leaves at least the ED. + _ => value.min((account.free + account.reserved).saturating_sub(T::ExistentialDeposit::get())), + }; + + let actual = cmp::min(account.reserved, best_value); + account.reserved -= actual; + + // underflow should never happen, but it if does, there's nothing to be done here. + (NegativeImbalance::new(actual), value - actual) + }) { + Ok(r) => return r, + Err(_) => (), + } + } + // Should never get here as we ensure that ED is left in the second attempt. + // In case we do, though, then we fail gracefully. + (Self::NegativeImbalance::zero(), value) } /// Move the reserved balance of one account into the balance of another, according to `status`. @@ -1264,30 +1335,12 @@ impl, I: Instance> ReservableCurrency for Module }) })?; - Self::deposit_event(RawEvent::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); + Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); Ok(value - actual) } } -/// Implement `OnKilledAccount` to remove the local account, if using local account storage. -/// -/// NOTE: You probably won't need to use this! This only needs to be "wired in" to System module -/// if you're using the local balance storage. **If you're using the composite system account -/// storage (which is the default in most examples and tests) then there's no need.** -impl, I: Instance> OnKilledAccount for Module { - fn on_killed_account(who: &T::AccountId) { - Account::::mutate_exists(who, |account| { - let total = account.as_ref().map(|acc| acc.total()).unwrap_or_default(); - if !total.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); - Self::deposit_event(RawEvent::DustLost(who.clone(), total)); - } - *account = None; - }); - } -} - -impl, I: Instance> LockableCurrency for Module +impl, I: 'static> LockableCurrency for Pallet where T::Balance: MaybeSerializeDeserialize + Debug { @@ -1351,12 +1404,3 @@ where Self::update_locks(who, &locks[..]); } } - -impl, I: Instance> IsDeadAccount for Module where - T::Balance: MaybeSerializeDeserialize + Debug -{ - fn is_dead_account(who: &T::AccountId) -> bool { - // this should always be exactly equivalent to `Self::account(who).total().is_zero()` if ExistentialDeposit > 0 - !T::AccountStore::is_explicit(who) - } -} diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index b8cf90dad9222..c860a0364d4bc 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,20 +19,6 @@ #![cfg(test)] -#[derive(Debug)] -pub struct CallWithDispatchInfo; -impl sp_runtime::traits::Dispatchable for CallWithDispatchInfo { - type Origin = (); - type Trait = (); - type Info = frame_support::weights::DispatchInfo; - type PostInfo = frame_support::weights::PostDispatchInfo; - - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); - } -} - #[macro_export] macro_rules! decl_tests { ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { @@ -40,10 +26,10 @@ macro_rules! decl_tests { use crate::*; use sp_runtime::{FixedPointNumber, traits::{SignedExtension, BadOrigin}}; use frame_support::{ - assert_noop, assert_ok, assert_err, + assert_noop, assert_storage_noop, assert_ok, assert_err, StorageValue, traits::{ LockableCurrency, LockIdentifier, WithdrawReasons, - Currency, ReservableCurrency, ExistenceRequirement::AllowDeath, StoredMap + Currency, ReservableCurrency, ExistenceRequirement::AllowDeath } }; use pallet_transaction_payment::{ChargeTransactionPayment, Multiplier}; @@ -52,10 +38,8 @@ macro_rules! decl_tests { const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; - pub type System = frame_system::Module<$test>; - pub type Balances = Module<$test>; - - pub const CALL: &<$test as frame_system::Trait>::Call = &$crate::tests::CallWithDispatchInfo; + pub const CALL: &<$test as frame_system::Config>::Call = + &Call::Balances(pallet_balances::Call::transfer(0, 0)); /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { @@ -91,7 +75,8 @@ macro_rules! decl_tests { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); - assert!(!<::AccountStore as StoredMap>>::is_explicit(&1)); + // Check that the account is dead. + assert!(!frame_system::Account::::contains_key(&1)); }); } @@ -262,14 +247,12 @@ macro_rules! decl_tests { .monied(true) .build() .execute_with(|| { - assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist // ext_deposit is 10, value is 9, not satisfies for ext_deposit assert_noop!( Balances::transfer(Some(1).into(), 5, 9), Error::<$test, _>::ExistentialDeposit, ); - assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist assert_eq!(Balances::free_balance(1), 100); }); } @@ -282,31 +265,25 @@ macro_rules! decl_tests { .build() .execute_with(|| { System::inc_account_nonce(&2); - assert_eq!(Balances::is_dead_account(&2), false); - assert_eq!(Balances::is_dead_account(&5), true); assert_eq!(Balances::total_balance(&2), 256 * 20); assert_ok!(Balances::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved assert_eq!(Balances::free_balance(2), 255); // "free" account deleted." assert_eq!(Balances::total_balance(&2), 256 * 20); // reserve still exists. - assert_eq!(Balances::is_dead_account(&2), false); assert_eq!(System::account_nonce(&2), 1); // account 4 tries to take index 1 for account 5. assert_ok!(Balances::transfer(Some(4).into(), 5, 256 * 1 + 0x69)); assert_eq!(Balances::total_balance(&5), 256 * 1 + 0x69); - assert_eq!(Balances::is_dead_account(&5), false); assert!(Balances::slash(&2, 256 * 19 + 2).1.is_zero()); // account 2 gets slashed // "reserve" account reduced to 255 (below ED) so account deleted assert_eq!(Balances::total_balance(&2), 0); assert_eq!(System::account_nonce(&2), 0); // nonce zero - assert_eq!(Balances::is_dead_account(&2), true); // account 4 tries to take index 1 again for account 6. assert_ok!(Balances::transfer(Some(4).into(), 6, 256 * 1 + 0x69)); assert_eq!(Balances::total_balance(&6), 256 * 1 + 0x69); - assert_eq!(Balances::is_dead_account(&6), false); }); } @@ -417,7 +394,7 @@ macro_rules! decl_tests { fn refunding_balance_should_work() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); - Balances::mutate_account(&1, |a| a.reserved = 69); + assert!(Balances::mutate_account(&1, |a| a.reserved = 69).is_ok()); Balances::unreserve(&1, 69); assert_eq!(Balances::free_balance(1), 111); assert_eq!(Balances::reserved_balance(1), 0); @@ -492,7 +469,7 @@ macro_rules! decl_tests { assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); assert_eq!( last_event(), - Event::balances(RawEvent::ReserveRepatriated(1, 2, 41, Status::Free)), + Event::pallet_balances(crate::Event::ReserveRepatriated(1, 2, 41, Status::Free)), ); assert_eq!(Balances::reserved_balance(1), 69); assert_eq!(Balances::free_balance(1), 0); @@ -623,22 +600,30 @@ macro_rules! decl_tests { Balances::transfer_keep_alive(Some(1).into(), 2, 100), Error::<$test, _>::KeepAlive ); - assert_eq!(Balances::is_dead_account(&1), false); assert_eq!(Balances::total_balance(&1), 100); assert_eq!(Balances::total_balance(&2), 0); }); } #[test] - #[should_panic = "the balance of any account should always be more than existential deposit."] + #[should_panic = "the balance of any account should always be at least the existential deposit."] fn cannot_set_genesis_value_below_ed() { ($existential_deposit).with(|v| *v.borrow_mut() = 11); let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); - let _ = GenesisConfig::<$test> { + let _ = pallet_balances::GenesisConfig::<$test> { balances: vec![(1, 10)], }.assimilate_storage(&mut t).unwrap(); } + #[test] + #[should_panic = "duplicate balances in genesis."] + fn cannot_set_genesis_value_twice() { + let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); + let _ = pallet_balances::GenesisConfig::<$test> { + balances: vec![(1, 10), (2, 20), (1, 15)], + }.assimilate_storage(&mut t).unwrap(); + } + #[test] fn dust_moves_between_free_and_reserved() { <$ext_builder>::default() @@ -686,7 +671,6 @@ macro_rules! decl_tests { // Reserve some free balance let _ = Balances::slash(&1, 1); // The account should be dead. - assert!(Balances::is_dead_account(&1)); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -704,7 +688,7 @@ macro_rules! decl_tests { assert_eq!( last_event(), - Event::balances(RawEvent::Reserved(1, 10)), + Event::pallet_balances(crate::Event::Reserved(1, 10)), ); System::set_block_number(3); @@ -712,7 +696,7 @@ macro_rules! decl_tests { assert_eq!( last_event(), - Event::balances(RawEvent::Unreserved(1, 5)), + Event::pallet_balances(crate::Event::Unreserved(1, 5)), ); System::set_block_number(4); @@ -721,7 +705,7 @@ macro_rules! decl_tests { // should only unreserve 5 assert_eq!( last_event(), - Event::balances(RawEvent::Unreserved(1, 5)), + Event::pallet_balances(crate::Event::Unreserved(1, 5)), ); }); } @@ -737,9 +721,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::system(system::RawEvent::NewAccount(1)), - Event::balances(RawEvent::Endowed(1, 100)), - Event::balances(RawEvent::BalanceSet(1, 100, 0)), + Event::frame_system(system::Event::NewAccount(1)), + Event::pallet_balances(crate::Event::Endowed(1, 100)), + Event::pallet_balances(crate::Event::BalanceSet(1, 100, 0)), ] ); @@ -748,8 +732,8 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::balances(RawEvent::DustLost(1, 99)), - Event::system(system::RawEvent::KilledAccount(1)) + Event::pallet_balances(crate::Event::DustLost(1, 99)), + Event::frame_system(system::Event::KilledAccount(1)) ] ); }); @@ -758,7 +742,7 @@ macro_rules! decl_tests { #[test] fn emit_events_with_no_existential_deposit_suicide() { <$ext_builder>::default() - .existential_deposit(0) + .existential_deposit(1) .build() .execute_with(|| { assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); @@ -766,26 +750,216 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::system(system::RawEvent::NewAccount(1)), - Event::balances(RawEvent::Endowed(1, 100)), - Event::balances(RawEvent::BalanceSet(1, 100, 0)), + Event::frame_system(system::Event::NewAccount(1)), + Event::pallet_balances(crate::Event::Endowed(1, 100)), + Event::pallet_balances(crate::Event::BalanceSet(1, 100, 0)), ] ); let _ = Balances::slash(&1, 100); - // no events - assert_eq!(events(), []); - - assert_ok!(System::suicide(Origin::signed(1))); - assert_eq!( events(), [ - Event::system(system::RawEvent::KilledAccount(1)) + Event::frame_system(system::Event::KilledAccount(1)) ] ); }); } + + #[test] + fn slash_loop_works() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + /* User has no reference counter, so they can die in these scenarios */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash will kill account because not enough balance left. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(950), 0)); + // Account is killed + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash will kill account, and report missing slash amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed full free_balance, and reports 300 not slashed + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1000), 300)); + // Account is dead + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, but keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, and kill. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); + // Account is dead because 50 reserved balance is not enough to keep alive + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash can take as much as possible from reserved, kill, and report missing amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); + // Account is super dead + assert!(!System::account_exists(&1)); + + /* User will now have a reference counter on them, keeping them alive in these scenarios */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash will take as much as possible without killing account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(900), 50)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash will not kill account, and report missing slash amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed full free_balance minus ED, and reports 400 not slashed + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(900), 400)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, but keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, but keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); + // Slashed full free_balance and 250 of reserved balance to leave ED + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take as much as possible from reserved and report missing amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1150), 150)); + // Account is still alive + assert!(System::account_exists(&1)); + + // Slash on non-existent account is okay. + assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); + }); + } + + #[test] + fn slash_reserved_loop_works() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + /* User has no reference counter, so they can die in these scenarios */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash would kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(1_000), 0)); + // Account is dead + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash would kill account, and reports left over slash. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); + // Account is dead + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash does not take from free balance. + assert_ok!(Balances::set_balance(Origin::root(), 1, 300, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); + // Account is alive because of free balance + assert!(System::account_exists(&1)); + + /* User has a reference counter, so they cannot die */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash as much as possible without killing. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed as much as possible + assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(950), 50)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash reports correctly, where reserved is needed to keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed as much as possible + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(950), 350)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash reports correctly, where full reserved is removed. + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 1_000)); + // Slashed as much as possible + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); + // Account is still alive + assert!(System::account_exists(&1)); + + // Slash on non-existent account is okay. + assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); + }); + } + + #[test] + fn operations_on_dead_account_should_not_change_state() { + // These functions all use `mutate_account` which may introduce a storage change when + // the account never existed to begin with, and shouldn't exist in the end. + <$ext_builder>::default() + .existential_deposit(0) + .build() + .execute_with(|| { + assert!(!frame_system::Account::::contains_key(&1337)); + + // Unreserve + assert_storage_noop!(assert_eq!(Balances::unreserve(&1337, 42), 42)); + // Reserve + assert_noop!(Balances::reserve(&1337, 42), Error::::InsufficientBalance); + // Slash Reserve + assert_storage_noop!(assert_eq!(Balances::slash_reserved(&1337, 42).1, 42)); + // Repatriate Reserve + assert_noop!(Balances::repatriate_reserved(&1337, &1338, 42, Status::Free), Error::::DeadAccount); + // Slash + assert_storage_noop!(assert_eq!(Balances::slash(&1337, 42).1, 42)); + }); + } } } diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 88b73b47273eb..14dfd0c4b33d6 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,59 +20,47 @@ #![cfg(test)] use sp_runtime::{ - Perbill, traits::IdentityLookup, testing::Header, }; use sp_core::H256; use sp_io; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; -use frame_support::traits::Get; +use frame_support::parameter_types; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; use pallet_transaction_payment::CurrencyAdapter; -use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; - -use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test {} -} - -mod balances { - pub use crate::Event; -} +use crate::{ + self as pallet_balances, + Module, Config, decl_tests, +}; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_event! { - pub enum Event for Test { - system, - balances, +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, } -} +); -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} - -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = CallWithDispatchInfo; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; @@ -80,36 +68,30 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = super::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; } -impl pallet_transaction_payment::Trait for Test { +impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl Trait for Test { +impl Config for Test { type Balance = u64; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = system::Module; + type AccountStore = frame_system::Pallet; type MaxLocks = (); type WeightInfo = (); } @@ -141,7 +123,7 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { + pallet_balances::GenesisConfig:: { balances: if self.monied { vec![ (1, 10 * self.existential_deposit), diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 319fb3640b4c7..ffefc6c4d88fc 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,59 +20,49 @@ #![cfg(test)] use sp_runtime::{ - Perbill, traits::IdentityLookup, testing::Header, }; use sp_core::H256; use sp_io; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; -use frame_support::traits::{Get, StorageMapShim}; +use frame_support::parameter_types; +use frame_support::traits::StorageMapShim; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; +use crate::{ + self as pallet_balances, + Module, Config, decl_tests, +}; use pallet_transaction_payment::CurrencyAdapter; -use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test {} -} - -mod balances { - pub use crate::Event; -} - -impl_outer_event! { - pub enum Event for Test { - system, - balances, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, } -} +); -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} - -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = CallWithDispatchInfo; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; @@ -80,24 +70,18 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); - type AccountData = super::AccountData; + type PalletInfo = PalletInfo; + type AccountData = (); type OnNewAccount = (); - type OnKilledAccount = Module; + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; } -impl pallet_transaction_payment::Trait for Test { +impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; @@ -106,16 +90,16 @@ impl pallet_transaction_payment::Trait for Test { parameter_types! { pub const MaxLocks: u32 = 50; } -impl Trait for Test { +impl Config for Test { type Balance = u64; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = StorageMapShim< super::Account, - system::CallOnCreatedAccount, - system::CallKillAccount, - u64, super::AccountData + system::Provider, + u64, + super::AccountData, >; type MaxLocks = MaxLocks; type WeightInfo = (); @@ -151,7 +135,7 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { + pallet_balances::GenesisConfig:: { balances: if self.monied { vec![ (1, 10 * self.existential_deposit), @@ -176,7 +160,7 @@ decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } #[test] fn emit_events_with_no_existential_deposit_suicide_with_dust() { ::default() - .existential_deposit(0) + .existential_deposit(2) .build() .execute_with(|| { assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); @@ -184,24 +168,24 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - Event::system(system::RawEvent::NewAccount(1)), - Event::balances(RawEvent::Endowed(1, 100)), - Event::balances(RawEvent::BalanceSet(1, 100, 0)), + Event::frame_system(system::Event::NewAccount(1)), + Event::pallet_balances(crate::Event::Endowed(1, 100)), + Event::pallet_balances(crate::Event::BalanceSet(1, 100, 0)), ] ); - let _ = Balances::slash(&1, 99); + let _ = Balances::slash(&1, 98); // no events assert_eq!(events(), []); - assert_ok!(System::suicide(Origin::signed(1))); + let _ = Balances::slash(&1, 1); assert_eq!( events(), [ - Event::balances(RawEvent::DustLost(1, 1)), - Event::system(system::RawEvent::KilledAccount(1)) + Event::pallet_balances(crate::Event::DustLost(1, 1)), + Event::frame_system(system::Event::KilledAccount(1)) ] ); }); diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 45e4195f962d1..2b69c9c11d599 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_balances +//! Autogenerated weights for pallet_balances +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2021-01-06, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -48,76 +49,63 @@ pub trait WeightInfo { fn set_balance_creating() -> Weight; fn set_balance_killing() -> Weight; fn force_transfer() -> Weight; - } /// Weights for pallet_balances using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn transfer() -> Weight { - (94_088_000 as Weight) + (100_698_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn transfer_keep_alive() -> Weight { - (64_828_000 as Weight) + (69_407_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_balance_creating() -> Weight { - (36_151_000 as Weight) + (38_489_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_balance_killing() -> Weight { - (45_505_000 as Weight) + (48_458_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn force_transfer() -> Weight { - (92_986_000 as Weight) + (99_320_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn transfer() -> Weight { - (94_088_000 as Weight) + (100_698_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn transfer_keep_alive() -> Weight { - (64_828_000 as Weight) + (69_407_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_balance_creating() -> Weight { - (36_151_000 as Weight) + (38_489_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_balance_killing() -> Weight { - (45_505_000 as Weight) + (48_458_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn force_transfer() -> Weight { - (92_986_000 as Weight) + (99_320_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - } diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 924ffc8627abc..41ab9efeced02 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,20 +13,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -linregress = "0.1" -paste = "0.1" -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-api = { version = "2.0.0", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface", default-features = false } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime", default-features = false } -sp-std = { version = "2.0.0", path = "../../primitives/std", default-features = false } -sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } -sp-storage = { version = "2.0.0", path = "../../primitives/storage", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +linregress = { version = "0.4.0", optional = true } +paste = "1.0" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-api = { version = "3.0.0", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "3.0.0", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "3.0.0", path = "../../primitives/std", default-features = false } +sp-io = { version = "3.0.0", path = "../../primitives/io", default-features = false } +sp-storage = { version = "3.0.0", path = "../../primitives/storage", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.3.1" +serde = "1.0.101" [features] default = [ "std" ] @@ -38,4 +39,5 @@ std = [ "sp-std/std", "frame-support/std", "frame-system/std", + "linregress", ] diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index 1727072709b21..38c683cb8db5b 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -43,7 +43,7 @@ The benchmarking framework comes with the following tools: * [A set of macros](./src/lib.rs) (`benchmarks!`, `add_benchmark!`, etc...) to make it easy to write, test, and add runtime benchmarks. * [A set of linear regression analysis functions](./src/analysis.rs) for processing benchmark data. -* [A CLI extension](../../utils/benchmarking-cli/) to make it easy to execute benchmarks on your +* [A CLI extension](../../utils/frame/benchmarking-cli/) to make it easy to execute benchmarks on your node. The end-to-end benchmarking pipeline is disabled by default when compiling a node. If you want to diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index dafb4a74b669f..bdfa1cf65c47f 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,11 @@ //! Tools for analyzing the benchmark results. use std::collections::BTreeMap; -use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; +use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; use crate::BenchmarkResults; +pub use linregress::RegressionModel; + pub struct Analysis { pub base: u128, pub slopes: Vec, diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 284b0545d03a5..d2cba9cc70973 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ mod analysis; pub use utils::*; #[cfg(feature = "std")] -pub use analysis::{Analysis, BenchmarkSelector}; +pub use analysis::{Analysis, BenchmarkSelector, RegressionModel}; #[doc(hidden)] pub use sp_io::storage::root as storage_root; pub use sp_runtime::traits::Zero; @@ -68,10 +68,6 @@ pub use sp_storage::TrackedStorageKey; /// for arbitrary expresions to be evaluated in a benchmark (including for example, /// `on_initialize`). /// -/// The macro allows for common parameters whose ranges and instancing expressions may be drawn upon -/// (or not) by each arm. Syntax is available to allow for only the range to be drawn upon if -/// desired, allowing an alternative instancing expression to be given. -/// /// Note that the ranges are *inclusive* on both sides. This is in contrast to ranges in Rust which /// are left-inclusive right-exclusive. /// @@ -80,39 +76,29 @@ pub use sp_storage::TrackedStorageKey; /// at any time. Local variables are shared between the two pre- and post- code blocks, but do not /// leak from the interior of any instancing expressions. /// -/// Any common parameters that are unused in an arm do not have their instancing expressions -/// evaluated. -/// /// Example: /// ```ignore /// benchmarks! { /// where_clause { where T::A: From } // Optional line to give additional bound on `T`. /// -/// // common parameter; just one for this example. -/// // will be `1`, `MAX_LENGTH` or any value inbetween -/// _ { -/// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// } -/// /// // first dispatchable: foo; this is a user dispatchable and operates on a `u8` vector of -/// // size `l`, which we allow to be initialized as usual. +/// // size `l` /// foo { /// let caller = account::(b"caller", 0, benchmarks_seed); -/// let l = ...; +/// let l in 1 .. MAX_LENGTH => initialize_l(l); /// }: _(Origin::Signed(caller), vec![0u8; l]) /// /// // second dispatchable: bar; this is a root dispatchable and accepts a `u8` vector of size -/// // `l`. We don't want it pre-initialized like before so we override using the `=> ()` notation. +/// // `l`. /// // In this case, we explicitly name the call using `bar` instead of `_`. /// bar { -/// let l = _ .. _ => (); +/// let l in 1 .. MAX_LENGTH => initialize_l(l); /// }: bar(Origin::Root, vec![0u8; l]) /// /// // third dispatchable: baz; this is a user dispatchable. It isn't dependent on length like the /// // other two but has its own complexity `c` that needs setting up. It uses `caller` (in the /// // pre-instancing block) within the code block. This is only allowed in the param instancers -/// // of arms. Instancers of common params cannot optimistically draw upon hypothetical variables -/// // that the arm's pre-instancing code block might have declared. +/// // of arms. /// baz1 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let c = 0 .. 10 => setup_c(&caller, c); @@ -137,7 +123,7 @@ pub use sp_storage::TrackedStorageKey; /// /// Test functions are automatically generated for each benchmark and are accessible to you when you /// run `cargo test`. All tests are named `test_benchmark_`, expect you to pass them -/// the Runtime Trait, and run them in a test externalities environment. The test function runs your +/// the Runtime Config, and run them in a test externalities environment. The test function runs your /// benchmark just like a regular benchmark, but only testing at the lowest and highest values for /// each component. The function will return `Ok(())` if the benchmarks return no errors. /// @@ -176,18 +162,11 @@ pub use sp_storage::TrackedStorageKey; #[macro_export] macro_rules! benchmarks { ( - $( where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } )? - _ { - $( - let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; - )* - } $( $rest:tt )* ) => { $crate::benchmarks_iter!( { } - { $( $( $where_ty: $where_bound ),* )? } - { $( { $common , $common_from , $common_to , $common_instancer } )* } + { } ( ) ( ) $( $rest )* @@ -199,18 +178,11 @@ macro_rules! benchmarks { #[macro_export] macro_rules! benchmarks_instance { ( - $( where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } )? - _ { - $( - let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; - )* - } $( $rest:tt )* ) => { $crate::benchmarks_iter!( { I } - { $( $( $where_ty: $where_bound ),* )? } - { $( { $common , $common_from , $common_to , $common_instancer } )* } + { } ( ) ( ) $( $rest )* @@ -221,11 +193,27 @@ macro_rules! benchmarks_instance { #[macro_export] #[doc(hidden)] macro_rules! benchmarks_iter { + // detect and extract where clause: + ( + { $( $instance:ident )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $( $instance)? } + { $( $where_ty: $where_bound ),* } + ( $( $names )* ) + ( $( $names_extra )* ) + $( $rest )* + } + }; // detect and extract extra tag: ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) #[extra] @@ -235,7 +223,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter! { { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* $name ) $name @@ -246,7 +233,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) @@ -256,7 +242,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter! { { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) $name { $( $code )* }: $name ( $origin $( , $arg )* ) @@ -268,7 +253,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) @@ -278,7 +262,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter! { { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) $name { $( $code )* }: { @@ -296,7 +279,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $eval:block @@ -307,7 +289,6 @@ macro_rules! benchmarks_iter { { $( $instance)? } $name { $( $where_clause )* } - { $( $common )* } { } { $eval } { $( $code )* } @@ -324,7 +305,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter!( { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* { $( $instance )? } $name ) ( $( $names_extra )* ) $( $rest )* @@ -334,7 +314,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ) => { @@ -354,7 +333,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) @@ -363,7 +341,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter! { { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) $name { $( $code )* }: _ ( $origin $( , $arg )* ) @@ -375,7 +352,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) @@ -384,7 +360,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter! { { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) @@ -396,7 +371,6 @@ macro_rules! benchmarks_iter { ( { $( $instance:ident )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $eval:block @@ -405,7 +379,6 @@ macro_rules! benchmarks_iter { $crate::benchmarks_iter!( { $( $instance)? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) $name { $( $code )* }: $eval @@ -423,7 +396,6 @@ macro_rules! benchmark_backend { { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( PRE { $( $pre_parsed:tt )* } )* } { $eval:block } { @@ -436,7 +408,6 @@ macro_rules! benchmark_backend { { $( $instance)? } $name { $( $where_clause )* } - { $( $common )* } { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } @@ -450,7 +421,6 @@ macro_rules! benchmark_backend { { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -463,7 +433,6 @@ macro_rules! benchmark_backend { { $( $instance)? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } @@ -473,74 +442,11 @@ macro_rules! benchmark_backend { $postcode } }; - // mutation arm to look after defaulting to a common param - ( - { $( $instance:ident )? } - $name:ident - { $( $where_clause:tt )* } - { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } - { $( $parsed:tt )* } - { $eval:block } - { - let $param:ident in ...; - $( $rest:tt )* - } - $postcode:block - ) => { - $crate::benchmark_backend! { - { $( $instance)? } - $name - { $( $where_clause )* } - { $( { $common , $common_from , $common_to , $common_instancer } )* } - { $( $parsed )* } - { $eval } - { - let $param - in ({ $( let $common = $common_from; )* $param }) - .. ({ $( let $common = $common_to; )* $param }) - => ({ $( let $common = || -> Result<(), &'static str> { $common_instancer ; Ok(()) }; )* $param()? }); - $( $rest )* - } - $postcode - } - }; - // mutation arm to look after defaulting only the range to common param - ( - { $( $instance:ident )? } - $name:ident - { $( $where_clause:tt )* } - { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } - { $( $parsed:tt )* } - { $eval:block } - { - let $param:ident in _ .. _ => $param_instancer:expr ; - $( $rest:tt )* - } - $postcode:block - ) => { - $crate::benchmark_backend! { - { $( $instance)? } - $name - { $( $where_clause )* } - { $( { $common , $common_from , $common_to , $common_instancer } )* } - { $( $parsed )* } - { $eval } - { - let $param - in ({ $( let $common = $common_from; )* $param }) - .. ({ $( let $common = $common_to; )* $param }) - => $param_instancer ; - $( $rest )* - } - $postcode - } - }; // mutation arm to look after a single tt for param_from. ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -553,7 +459,6 @@ macro_rules! benchmark_backend { { $( $instance)? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* } { $eval } { @@ -568,7 +473,6 @@ macro_rules! benchmark_backend { { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -581,7 +485,6 @@ macro_rules! benchmark_backend { { $( $instance)? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* } { $eval } { @@ -596,7 +499,6 @@ macro_rules! benchmark_backend { { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -609,7 +511,6 @@ macro_rules! benchmark_backend { { $( $instance)? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* } { $eval } { @@ -624,7 +525,6 @@ macro_rules! benchmark_backend { { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } - { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* @@ -636,7 +536,7 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl, I: Instance)? > + impl, I: Instance)? > $crate::BenchmarkingSetup for $name where $( $where_clause )* { @@ -653,9 +553,6 @@ macro_rules! benchmark_backend { components: &[($crate::BenchmarkParameter, u32)], verify: bool ) -> Result Result<(), &'static str>>, &'static str> { - $( - let $common = $common_from; - )* $( // Prepare instance let $param = components.iter() @@ -710,7 +607,7 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl, I: Instance )? > + impl, I: Instance )? > $crate::BenchmarkingSetup for SelectedBenchmark where $( $where_clause )* { @@ -750,9 +647,9 @@ macro_rules! impl_benchmark { ( $( { $( $name_inst:ident )? } $name:ident )* ) ( $( $name_extra:ident ),* ) ) => { - impl, I: Instance)? > + impl, I: Instance)? > $crate::Benchmarking<$crate::BenchmarkResults> for Module - where T: frame_system::Trait, $( $where_clause )* + where T: frame_system::Config, $( $where_clause )* { fn benchmarks(extra: bool) -> Vec<&'static [u8]> { let mut all = vec![ $( stringify!($name).as_ref() ),* ]; @@ -948,8 +845,8 @@ macro_rules! impl_benchmark_test { $name:ident ) => { $crate::paste::item! { - fn [] () -> Result<(), &'static str> - where T: frame_system::Trait, $( $where_clause )* + fn [] () -> Result<(), &'static str> + where T: frame_system::Config, $( $where_clause )* { let selected_benchmark = SelectedBenchmark::$name; let components = < @@ -1006,6 +903,39 @@ macro_rules! impl_benchmark_test { }; } +/// show error message and debugging info for the case of an error happening +/// during a benchmark +pub fn show_benchmark_debug_info( + instance_string: &[u8], + benchmark: &[u8], + lowest_range_values: &sp_std::prelude::Vec, + highest_range_values: &sp_std::prelude::Vec, + steps: &sp_std::prelude::Vec, + repeat: &u32, + verify: &bool, + error_message: &str, +) -> sp_runtime::RuntimeString { + sp_runtime::format_runtime_string!( + "\n* Pallet: {}\n\ + * Benchmark: {}\n\ + * Lowest_range_values: {:?}\n\ + * Highest_range_values: {:?}\n\ + * Steps: {:?}\n\ + * Repeat: {:?}\n\ + * Verify: {:?}\n\ + * Error message: {}", + sp_std::str::from_utf8(instance_string) + .expect("it's all just strings ran through the wasm interface. qed"), + sp_std::str::from_utf8(benchmark) + .expect("it's all just strings ran through the wasm interface. qed"), + lowest_range_values, + highest_range_values, + steps, + repeat, + verify, + error_message, + ) +} /// This macro adds pallet benchmarks to a `Vec` object. /// @@ -1052,10 +982,29 @@ macro_rules! impl_benchmark_test { /// ``` /// /// At the end of `dispatch_benchmark`, you should return this batches object. +/// +/// In the case where you have multiple instances of a pallet that you need to separately benchmark, +/// the name of your module struct will be used as a suffix to your outputted weight file. For +/// example: +/// +/// ```ignore +/// add_benchmark!(params, batches, pallet_balances, Balances); // pallet_balances.rs +/// add_benchmark!(params, batches, pallet_collective, Council); // pallet_collective_council.rs +/// add_benchmark!(params, batches, pallet_collective, TechnicalCommittee); // pallet_collective_technical_committee.rs +/// ``` +/// +/// You can manipulate this suffixed string by using a type alias if needed. For example: +/// +/// ```ignore +/// type Council2 = TechnicalCommittee; +/// add_benchmark!(params, batches, pallet_collective, Council2); // pallet_collective_council_2.rs +/// ``` + #[macro_export] macro_rules! add_benchmark { - ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( + ( $params:ident, $batches:ident, $name:path, $( $location:tt )* ) => ( let name_string = stringify!($name).as_bytes(); + let instance_string = stringify!( $( $location )* ).as_bytes(); let (config, whitelist) = $params; let $crate::BenchmarkConfig { pallet, @@ -1071,6 +1020,9 @@ macro_rules! add_benchmark { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { for benchmark in $( $location )*::benchmarks(*extra).into_iter() { $batches.push($crate::BenchmarkBatch { + pallet: name_string.to_vec(), + instance: instance_string.to_vec(), + benchmark: benchmark.to_vec(), results: $( $location )*::run_benchmark( benchmark, &lowest_range_values[..], @@ -1079,13 +1031,25 @@ macro_rules! add_benchmark { *repeat, whitelist, *verify, - )?, - pallet: name_string.to_vec(), - benchmark: benchmark.to_vec(), + ).map_err(|e| { + $crate::show_benchmark_debug_info( + instance_string, + benchmark, + lowest_range_values, + highest_range_values, + steps, + repeat, + verify, + e, + ) + })?, }); } } else { $batches.push($crate::BenchmarkBatch { + pallet: name_string.to_vec(), + instance: instance_string.to_vec(), + benchmark: benchmark.clone(), results: $( $location )*::run_benchmark( &benchmark[..], &lowest_range_values[..], @@ -1094,9 +1058,18 @@ macro_rules! add_benchmark { *repeat, whitelist, *verify, - )?, - pallet: name_string.to_vec(), - benchmark: benchmark.clone(), + ).map_err(|e| { + $crate::show_benchmark_debug_info( + instance_string, + benchmark, + lowest_range_values, + highest_range_values, + steps, + repeat, + verify, + e, + ) + })?, }); } } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 0429d98e18618..53093fdf062d3 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,240 +21,267 @@ use super::*; use sp_std::prelude::*; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; -use frame_support::{ - dispatch::DispatchResult, - decl_module, decl_storage, impl_outer_origin, assert_ok, assert_err, ensure -}; -use frame_system::{RawOrigin, ensure_signed, ensure_none}; - -decl_storage! { - trait Store for Module as Test where - ::OtherEvent: Into<::Event> - { - Value get(fn value): Option; - } -} +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}, BuildStorage}; +use frame_support::parameter_types; -decl_module! { - pub struct Module for enum Call where - origin: T::Origin, ::OtherEvent: Into<::Event> - { - #[weight = 0] - fn set_value(origin, n: u32) -> DispatchResult { - let _sender = ensure_signed(origin)?; - Value::put(n); - Ok(()) +mod pallet_test { + use frame_support::pallet_prelude::Get; + + frame_support::decl_storage! { + trait Store for Module as Test where + ::OtherEvent: Into<::Event> + { + pub Value get(fn value): Option; } + } - #[weight = 0] - fn dummy(origin, _n: u32) -> DispatchResult { - let _sender = ensure_none(origin)?; - Ok(()) + frame_support::decl_module! { + pub struct Module for enum Call where + origin: T::Origin, ::OtherEvent: Into<::Event> + { + #[weight = 0] + fn set_value(origin, n: u32) -> frame_support::dispatch::DispatchResult { + let _sender = frame_system::ensure_signed(origin)?; + Value::put(n); + Ok(()) + } + + #[weight = 0] + fn dummy(origin, _n: u32) -> frame_support::dispatch::DispatchResult { + let _sender = frame_system::ensure_none(origin)?; + Ok(()) + } } } -} -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} + pub trait OtherConfig { + type OtherEvent; + } -pub trait OtherTrait { - type OtherEvent; + pub trait Config: frame_system::Config + OtherConfig + where Self::OtherEvent: Into<::Event> + { + type Event; + type LowerBound: Get; + type UpperBound: Get; + } } -pub trait Trait: frame_system::Trait + OtherTrait - where Self::OtherEvent: Into<::Event> -{ - type Event; -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -#[derive(Clone, Eq, PartialEq)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + TestPallet: pallet_test::{Module, Call, Storage}, + } +); -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type MaximumBlockLength = (); - type AvailableBlockRatio = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl Trait for Test { - type Event = (); +parameter_types!{ + pub const LowerBound: u32 = 1; + pub const UpperBound: u32 = 100; } -impl OtherTrait for Test { - type OtherEvent = (); +impl pallet_test::Config for Test { + type Event = Event; + type LowerBound = LowerBound; + type UpperBound = UpperBound; } -fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() +impl pallet_test::OtherConfig for Test { + type OtherEvent = Event; } -benchmarks!{ - where_clause { where ::OtherEvent: Into<::Event> } - - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); - } +fn new_test_ext() -> sp_io::TestExternalities { + GenesisConfig::default().build_storage().unwrap().into() +} - set_value { - let b in ...; - let caller = account::("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - verify { - assert_eq!(Value::get(), Some(b)); - } +mod benchmarks { + use sp_std::prelude::*; + use frame_system::RawOrigin; + use super::{Test, pallet_test::{self, Value}, new_test_ext}; + use frame_support::{assert_ok, assert_err, ensure, traits::Get, StorageValue}; + use crate::{BenchmarkingSetup, BenchmarkParameter, account}; - other_name { - let b in ...; - }: dummy (RawOrigin::None, b.into()) + // Additional used internally by the benchmark macro. + use super::pallet_test::{Call, Config, Module}; - sort_vector { - let x in 1 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); + crate::benchmarks!{ + where_clause { + where + ::OtherEvent: Into<::Event> } - }: { - m.sort(); - } verify { - ensure!(m[0] == 0, "You forgot to sort!") - } - - bad_origin { - let b in ...; - let caller = account::("caller", 0, 0); - }: dummy (RawOrigin::Signed(caller), b.into()) - bad_verify { - let x in 1 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); + set_value { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::get(), Some(b)); } - }: { } - verify { - ensure!(m[0] == 0, "You forgot to sort!") - } - - no_components { - let caller = account::("caller", 0, 0); - }: set_value(RawOrigin::Signed(caller), 0) -} - -#[test] -fn benchmarks_macro_works() { - // Check benchmark creation for `set_value`. - let selected = SelectedBenchmark::set_value; - let components = >::components(&selected); - assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + other_name { + let b in 1 .. 1000; + }: dummy (RawOrigin::None, b.into()) + + sort_vector { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + m.sort(); + } verify { + ensure!(m[0] == 0, "You forgot to sort!") + } - let closure = >::instance( - &selected, - &[(BenchmarkParameter::b, 1)], - true, - ).expect("failed to create closure"); + bad_origin { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: dummy (RawOrigin::Signed(caller), b.into()) + + bad_verify { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { } + verify { + ensure!(m[0] == 0, "You forgot to sort!") + } - new_test_ext().execute_with(|| { - assert_ok!(closure()); - }); -} + no_components { + let caller = account::("caller", 0, 0); + }: set_value(RawOrigin::Signed(caller), 0) -#[test] -fn benchmarks_macro_rename_works() { - // Check benchmark creation for `other_dummy`. - let selected = SelectedBenchmark::other_name; - let components = >::components(&selected); - assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + variable_components { + let b in ( T::LowerBound::get() ) .. T::UpperBound::get(); + }: dummy (RawOrigin::None, b.into()) + } - let closure = >::instance( - &selected, - &[(BenchmarkParameter::b, 1)], - true, - ).expect("failed to create closure"); + #[test] + fn benchmarks_macro_works() { + // Check benchmark creation for `set_value`. + let selected = SelectedBenchmark::set_value; - new_test_ext().execute_with(|| { - assert_ok!(closure()); - }); -} + let components = >::components(&selected); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); -#[test] -fn benchmarks_macro_works_for_non_dispatchable() { - let selected = SelectedBenchmark::sort_vector; + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ).expect("failed to create closure"); - let components = >::components(&selected); - assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); + } - let closure = >::instance( - &selected, - &[(BenchmarkParameter::x, 1)], - true, - ).expect("failed to create closure"); + #[test] + fn benchmarks_macro_rename_works() { + // Check benchmark creation for `other_dummy`. + let selected = SelectedBenchmark::other_name; + let components = >::components(&selected); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); + } - assert_ok!(closure()); -} + #[test] + fn benchmarks_macro_works_for_non_dispatchable() { + let selected = SelectedBenchmark::sort_vector; -#[test] -fn benchmarks_macro_verify_works() { - // Check postcondition for benchmark `set_value` is valid. - let selected = SelectedBenchmark::set_value; + let components = >::components(&selected); + assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); - let closure = >::instance( - &selected, - &[(BenchmarkParameter::b, 1)], - true, - ).expect("failed to create closure"); + let closure = >::instance( + &selected, + &[(BenchmarkParameter::x, 1)], + true, + ).expect("failed to create closure"); - new_test_ext().execute_with(|| { assert_ok!(closure()); - }); - - // Check postcondition for benchmark `bad_verify` is invalid. - let selected = SelectedBenchmark::bad_verify; - - let closure = >::instance( - &selected, - &[(BenchmarkParameter::x, 10000)], - true, - ).expect("failed to create closure"); + } - new_test_ext().execute_with(|| { - assert_err!(closure(), "You forgot to sort!"); - }); -} + #[test] + fn benchmarks_macro_verify_works() { + // Check postcondition for benchmark `set_value` is valid. + let selected = SelectedBenchmark::set_value; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); + + // Check postcondition for benchmark `bad_verify` is invalid. + let selected = SelectedBenchmark::bad_verify; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::x, 10000)], + true, + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_err!(closure(), "You forgot to sort!"); + }); + } -#[test] -fn benchmarks_generate_unit_tests() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_value::()); - assert_ok!(test_benchmark_other_name::()); - assert_ok!(test_benchmark_sort_vector::()); - assert_err!(test_benchmark_bad_origin::(), "Bad origin"); - assert_err!(test_benchmark_bad_verify::(), "You forgot to sort!"); - assert_ok!(test_benchmark_no_components::()); - }); + #[test] + fn benchmarks_generate_unit_tests() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_value::()); + assert_ok!(test_benchmark_other_name::()); + assert_ok!(test_benchmark_sort_vector::()); + assert_err!(test_benchmark_bad_origin::(), "Bad origin"); + assert_err!(test_benchmark_bad_verify::(), "You forgot to sort!"); + assert_ok!(test_benchmark_no_components::()); + assert_ok!(test_benchmark_variable_components::()); + }); + } } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 042f4b707aef4..1574e47454b59 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,6 @@ use codec::{Encode, Decode}; use sp_std::{vec::Vec, prelude::Box}; use sp_io::hashing::blake2_256; -use sp_runtime::RuntimeString; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. @@ -43,6 +42,8 @@ impl std::fmt::Display for BenchmarkParameter { pub struct BenchmarkBatch { /// The pallet containing this benchmark. pub pallet: Vec, + /// The instance of this pallet being benchmarked. + pub instance: Vec, /// The extrinsic (or benchmark name) of this benchmark. pub benchmark: Vec, /// The results from this benchmark. @@ -88,7 +89,7 @@ sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. pub trait Benchmark { /// Dispatch the given benchmark. - fn dispatch_benchmark(config: BenchmarkConfig) -> Result, RuntimeString>; + fn dispatch_benchmark(config: BenchmarkConfig) -> Result, sp_runtime::RuntimeString>; } } diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml new file mode 100644 index 0000000000000..ec4f1b94cd625 --- /dev/null +++ b/frame/bounties/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-bounties" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage bounties" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-treasury = { version = "3.0.0", default-features = false, path = "../treasury" } + +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } +pallet-balances = { version = "3.0.0", path = "../balances" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-treasury/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/bounties/README.md b/frame/bounties/README.md new file mode 100644 index 0000000000000..bf63fca5f34b2 --- /dev/null +++ b/frame/bounties/README.md @@ -0,0 +1,52 @@ +# Bounties Module ( pallet-bounties ) + +## Bounty + +**Note :: This pallet is tightly coupled with pallet-treasury** + +A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that +needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after +the bounty is approved and funded by Council, to be delegated with the responsibility of assigning a +payout address once the specified set of objectives is completed. + +After the Council has activated a bounty, it delegates the work that requires expertise to a curator +in exchange of a deposit. Once the curator accepts the bounty, they get to close the active bounty. +Closing the active bounty enacts a delayed payout to the payout address, the curator fee and the +return of the curator deposit. The delay allows for intervention through regular democracy. The +Council gets to unassign the curator, resulting in a new curator election. The Council also gets to +cancel the bounty if deemed necessary before assigning a curator or once the bounty is active or +payout is pending, resulting in the slash of the curator's deposit. + +### Terminology + +- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by + the Treasury. +- **Proposer:** An account proposing a bounty spending. +- **Curator:** An account managing the bounty and assigning a payout address receiving the reward + for the completion of work. +- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on + deposit per byte within the bounty description. +- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The + deposit is returned when/if the bounty is completed. +- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is + rewarded. +- **Payout address:** The account to which the total or part of the bounty is assigned to. +- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. +- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. + +## Interface + +### Dispatchable Functions + +Bounty protocol: +- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of + tasks and stake the required deposit. +- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of + work. +- `propose_curator` - Assign an account to a bounty as candidate curator. +- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +- `award_bounty` - Close and pay out the specified amount for the completed work. +- `claim_bounty` - Claim a specific bounty amount from the Payout Address. +- `unassign_curator` - Unassign an accepted curator from a specific earmark. +- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs new file mode 100644 index 0000000000000..f6fc11ad0bf06 --- /dev/null +++ b/frame/bounties/src/benchmarking.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! bounties pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use sp_runtime::traits::Bounded; +use frame_system::{EventRecord, RawOrigin}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_support::traits::OnInitialize; + +use crate::Module as Bounties; +use pallet_treasury::Module as Treasury; + +const SEED: u32 = 0; + +// Create bounties that are approved for use in `on_initialize`. +fn create_approved_bounties(n: u32) -> Result<(), &'static str> { + for i in 0 .. n { + let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + } + ensure!(BountyApprovals::get().len() == n as usize, "Not all bounty approved"); + Ok(()) +} + +// Create the pre-requisite information needed to create a treasury `propose_bounty`. +fn setup_bounty(u: u32, d: u32) -> ( + T::AccountId, + T::AccountId, + BalanceOf, + BalanceOf, + Vec, +) { + let caller = account("caller", u, SEED); + let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); + let fee = value / 2u32.into(); + let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); + let _ = T::Currency::make_free_balance_be(&caller, deposit); + let curator = account("curator", u, SEED); + let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); + let reason = vec![0; d as usize]; + (caller, curator, fee, value, reason) +} + +fn create_bounty() -> Result<( + ::Source, + BountyIndex, +), &'static str> { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; + Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; + Ok((curator_lookup, bounty_id)) +} + +fn setup_pot_account() { + let pot_account = Bounties::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +const MAX_BYTES: u32 = 16384; + +benchmarks! { + propose_bounty { + let d in 0 .. MAX_BYTES; + + let (caller, curator, fee, value, description) = setup_bounty::(0, d); + }: _(RawOrigin::Signed(caller), value, description) + + approve_bounty { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + }: _(RawOrigin::Root, bounty_id) + + propose_curator { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Bounties::::on_initialize(T::BlockNumber::zero()); + }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) + + // Worst case when curator is inactive and any sender unassigns the curator. + unassign_curator { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::get() - 1; + frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), bounty_id) + + accept_curator { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Bounties::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + }: _(RawOrigin::Signed(curator), bounty_id) + + award_bounty { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); + }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) + + claim_bounty { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + + let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); + let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); + Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; + + frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); + ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary already has balance"); + + }: _(RawOrigin::Signed(curator), bounty_id) + verify { + ensure!(!T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary didn't get paid"); + } + + close_bounty_proposed { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + + close_bounty_active { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + verify { + assert_last_event::(RawEvent::BountyCanceled(bounty_id).into()) + } + + extend_bounty_expiry { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) + verify { + assert_last_event::(RawEvent::BountyExtended(bounty_id).into()) + } + + spend_funds { + let b in 1 .. 100; + setup_pot_account::(); + create_approved_bounties::(b)?; + + let mut budget_remaining = BalanceOf::::max_value(); + let mut imbalance = PositiveImbalanceOf::::zero(); + let mut total_weight = Weight::zero(); + let mut missed_any = false; + }: { + as pallet_treasury::SpendFunds>::spend_funds( + &mut budget_remaining, + &mut imbalance, + &mut total_weight, + &mut missed_any, + ); + } + verify { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + ensure!(missed_any == false, "Missed some"); + assert_last_event::(RawEvent::BountyBecameActive(b - 1).into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_propose_bounty::()); + assert_ok!(test_benchmark_approve_bounty::()); + assert_ok!(test_benchmark_propose_curator::()); + assert_ok!(test_benchmark_unassign_curator::()); + assert_ok!(test_benchmark_accept_curator::()); + assert_ok!(test_benchmark_award_bounty::()); + assert_ok!(test_benchmark_claim_bounty::()); + assert_ok!(test_benchmark_close_bounty_proposed::()); + assert_ok!(test_benchmark_close_bounty_active::()); + assert_ok!(test_benchmark_extend_bounty_expiry::()); + assert_ok!(test_benchmark_spend_funds::()); + }); + } +} diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs new file mode 100644 index 0000000000000..ba0d4a5b16cb4 --- /dev/null +++ b/frame/bounties/src/lib.rs @@ -0,0 +1,760 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bounties Module ( pallet-bounties ) +//! +//! ## Bounty +//! +//! > NOTE: This pallet is tightly coupled with pallet-treasury. +//! +//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - +//! that needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned +//! after the bounty is approved and funded by Council, to be delegated with the responsibility of +//! assigning a payout address once the specified set of objectives is completed. +//! +//! After the Council has activated a bounty, it delegates the work that requires expertise to a +//! curator in exchange of a deposit. Once the curator accepts the bounty, they get to close the +//! active bounty. Closing the active bounty enacts a delayed payout to the payout address, the +//! curator fee and the return of the curator deposit. The delay allows for intervention through +//! regular democracy. The Council gets to unassign the curator, resulting in a new curator +//! election. The Council also gets to cancel the bounty if deemed necessary before assigning a +//! curator or once the bounty is active or payout is pending, resulting in the slash of the +//! curator's deposit. +//! +//! +//! ### Terminology +//! +//! Bounty: +//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion +//! by the Treasury. +//! - **Proposer:** An account proposing a bounty spending. +//! - **Curator:** An account managing the bounty and assigning a payout address receiving the +//! reward for the completion of work. +//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on +//! deposit per byte within the bounty description. +//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The +//! deposit is returned when/if the bounty is completed. +//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is +//! rewarded. +//! - **Payout address:** The account to which the total or part of the bounty is assigned to. +//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before +//! claiming. +//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! Bounty protocol: +//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of +//! tasks and stake the required deposit. +//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of +//! work. +//! - `propose_curator` - Assign an account to a bounty as candidate curator. +//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +//! - `award_bounty` - Close and pay out the specified amount for the completed work. +//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. +//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. +//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod tests; +mod benchmarking; +pub mod weights; + +use sp_std::prelude::*; + +use frame_support::{decl_module, decl_storage, decl_event, ensure, decl_error}; + +use frame_support::traits::{ + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{AllowDeath}, + ReservableCurrency}; + +use sp_runtime::{Permill, RuntimeDebug, DispatchResult, traits::{ + Zero, StaticLookup, AccountIdConversion, Saturating, BadOrigin +}}; + +use frame_support::dispatch::DispatchResultWithPostInfo; +use frame_support::traits::{EnsureOrigin}; + +use frame_support::weights::{Weight}; + +use codec::{Encode, Decode}; +use frame_system::{self as system, ensure_signed}; +pub use weights::WeightInfo; + +type BalanceOf = pallet_treasury::BalanceOf; + +type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; + +pub trait Config: frame_system::Config + pallet_treasury::Config { + + /// The amount held on deposit for placing a bounty proposal. + type BountyDepositBase: Get>; + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + type BountyDepositPayoutDelay: Get; + + /// Bounty duration in blocks. + type BountyUpdatePeriod: Get; + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + type BountyCuratorDeposit: Get; + + /// Minimum value for a bounty. + type BountyValueMinimum: Get>; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + type DataDepositPerByte: Get>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// Maximum acceptable reason length. + type MaximumReasonLength: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +/// An index of a bounty. Just a `u32`. +pub type BountyIndex = u32; + +/// A bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Bounty { + /// The account proposing it. + proposer: AccountId, + /// The (total) amount that should be paid if the bounty is rewarded. + value: Balance, + /// The curator fee. Included in value. + fee: Balance, + /// The deposit of curator. + curator_deposit: Balance, + /// The amount held on deposit (reserved) for making this proposal. + bond: Balance, + /// The status of this bounty. + status: BountyStatus, +} + +/// The status of a bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum BountyStatus { + /// The bounty is proposed and waiting for approval. + Proposed, + /// The bounty is approved and waiting to become active at next spend period. + Approved, + /// The bounty is funded and waiting for curator assignment. + Funded, + /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the curator. + CuratorProposed { + /// The assigned curator of this bounty. + curator: AccountId, + }, + /// The bounty is active and waiting to be awarded. + Active { + /// The curator of this bounty. + curator: AccountId, + /// An update from the curator is due by this block, else they are considered inactive. + update_due: BlockNumber, + }, + /// The bounty is awarded and waiting to released after a delay. + PendingPayout { + /// The curator of this bounty. + curator: AccountId, + /// The beneficiary of the bounty. + beneficiary: AccountId, + /// When the bounty can be claimed. + unlock_at: BlockNumber, + }, +} + +// Note :: For backward compatibility reasons, +// pallet-bounties uses Treasury for storage. +// This is temporary solution, soon will get replaced with +// Own storage identifier. +decl_storage! { + trait Store for Module as Treasury { + + /// Number of bounty proposals that have been made. + pub BountyCount get(fn bounty_count): BountyIndex; + + /// Bounties that have been made. + pub Bounties get(fn bounties): + map hasher(twox_64_concat) BountyIndex + => Option, T::BlockNumber>>; + + /// The description of each bounty. + pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; + + /// Bounty indices that have been approved but not yet funded. + pub BountyApprovals get(fn bounty_approvals): Vec; + } +} + +decl_event!( + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + { + /// New bounty proposal. \[index\] + BountyProposed(BountyIndex), + /// A bounty proposal was rejected; funds were slashed. \[index, bond\] + BountyRejected(BountyIndex, Balance), + /// A bounty proposal is funded and became active. \[index\] + BountyBecameActive(BountyIndex), + /// A bounty is awarded to a beneficiary. \[index, beneficiary\] + BountyAwarded(BountyIndex, AccountId), + /// A bounty is claimed by beneficiary. \[index, payout, beneficiary\] + BountyClaimed(BountyIndex, Balance, AccountId), + /// A bounty is cancelled. \[index\] + BountyCanceled(BountyIndex), + /// A bounty expiry is extended. \[index\] + BountyExtended(BountyIndex), + } +); + +decl_error! { + /// Error for the treasury module. + pub enum Error for Module { + /// Proposer's balance is too low. + InsufficientProposersBalance, + /// No proposal or bounty at that index. + InvalidIndex, + /// The reason given is just too big. + ReasonTooBig, + /// The bounty status is unexpected. + UnexpectedStatus, + /// Require bounty curator. + RequireCurator, + /// Invalid bounty value. + InvalidValue, + /// Invalid bounty fee. + InvalidFee, + /// A bounty payout is pending. + /// To cancel the bounty, you must unassign and slash the curator. + PendingPayout, + /// The bounties cannot be claimed/closed because it's still in the countdown period. + Premature, + } +} + +decl_module! { + pub struct Module + for enum Call + where origin: T::Origin + { + /// The amount held on deposit per byte within bounty description. + const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); + + /// The amount held on deposit for placing a bounty proposal. + const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); + + /// Bounty duration in blocks. + const BountyUpdatePeriod: T::BlockNumber = T::BountyUpdatePeriod::get(); + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); + + /// Minimum value for a bounty. + const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); + + /// Maximum acceptable reason length. + const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// Propose a new bounty. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, + /// or slashed when rejected. + /// + /// - `curator`: The curator account whom will manage this bounty. + /// - `fee`: The curator fee. + /// - `value`: The total payment amount of this bounty, curator fee included. + /// - `description`: The description of this bounty. + #[weight = ::WeightInfo::propose_bounty(description.len() as u32)] + fn propose_bounty( + origin, + #[compact] value: BalanceOf, + description: Vec, + ) { + let proposer = ensure_signed(origin)?; + Self::create_bounty(proposer, description, value)?; + } + + /// Approve a bounty proposal. At a later time, the bounty will be funded and become active + /// and the original deposit will be returned. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::approve_bounty()] + fn approve_bounty(origin, #[compact] bounty_id: BountyIndex) { + T::ApproveOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + + bounty.status = BountyStatus::Approved; + + BountyApprovals::append(bounty_id); + + Ok(()) + })?; + } + + /// Assign a curator to a funded bounty. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::propose_curator()] + fn propose_curator( + origin, + #[compact] bounty_id: BountyIndex, + curator: ::Source, + #[compact] fee: BalanceOf, + ) { + T::ApproveOrigin::ensure_origin(origin)?; + + let curator = T::Lookup::lookup(curator)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, + _ => return Err(Error::::UnexpectedStatus.into()), + }; + + ensure!(fee < bounty.value, Error::::InvalidFee); + + bounty.status = BountyStatus::CuratorProposed { curator }; + bounty.fee = fee; + + Ok(()) + })?; + } + + /// Unassign curator from a bounty. + /// + /// This function can only be called by the `RejectOrigin` a signed origin. + /// + /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious + /// or inactive. As a result, we will slash the curator when possible. + /// + /// If the origin is the curator, we take this as a sign they are unable to do their job and + /// they willingly give up. We could slash them, but for now we allow them to recover their + /// deposit and exit without issue. (We may want to change this if it is abused.) + /// + /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows + /// anyone in the community to call out that a curator is not doing their due diligence, and + /// we should pick a new curator. In this case the curator should also be slashed. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::unassign_curator()] + fn unassign_curator( + origin, + #[compact] bounty_id: BountyIndex, + ) { + let maybe_sender = ensure_signed(origin.clone()) + .map(Some) + .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; + + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { + // No curator to unassign at this point. + return Err(Error::::UnexpectedStatus.into()) + } + BountyStatus::CuratorProposed { ref curator } => { + // A curator has been proposed, but not accepted yet. + // Either `RejectOrigin` or the proposed curator can unassign the curator. + ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); + }, + BountyStatus::Active { ref curator, ref update_due } => { + // The bounty is active. + match maybe_sender { + // If the `RejectOrigin` is calling this function, slash the curator. + None => { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + }, + Some(sender) => { + // If the sender is not the curator, and the curator is inactive, + // slash the curator. + if sender != *curator { + let block_number = system::Module::::block_number(); + if *update_due < block_number { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } else { + // Curator has more time to give an update. + return Err(Error::::Premature.into()) + } + } else { + // Else this is the curator, willingly giving up their role. + // Give back their deposit. + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + // Continue to change bounty status below... + } + }, + } + }, + BountyStatus::PendingPayout { ref curator, .. } => { + // The bounty is pending payout, so only council can unassign a curator. + // By doing so, they are claiming the curator is acting maliciously, so + // we slash the curator. + ensure!(maybe_sender.is_none(), BadOrigin); + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } + }; + + bounty.status = BountyStatus::Funded; + Ok(()) + })?; + } + + /// Accept the curator role for a bounty. + /// A deposit will be reserved from curator and refund upon successful payout. + /// + /// May only be called from the curator. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::accept_curator()] + fn accept_curator(origin, #[compact] bounty_id: BountyIndex) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::CuratorProposed { ref curator } => { + ensure!(signer == *curator, Error::::RequireCurator); + + let deposit = T::BountyCuratorDeposit::get() * bounty.fee; + T::Currency::reserve(curator, deposit)?; + bounty.curator_deposit = deposit; + + let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); + bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; + + Ok(()) + }, + _ => Err(Error::::UnexpectedStatus.into()), + } + })?; + } + + /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to award. + /// - `beneficiary`: The beneficiary account whom will receive the payout. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::award_bounty()] + fn award_bounty(origin, #[compact] bounty_id: BountyIndex, beneficiary: ::Source) { + let signer = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match &bounty.status { + BountyStatus::Active { + curator, + .. + } => { + ensure!(signer == *curator, Error::::RequireCurator); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + bounty.status = BountyStatus::PendingPayout { + curator: signer, + beneficiary: beneficiary.clone(), + unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), + }; + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); + } + + /// Claim the payout from an awarded bounty after payout delay. + /// + /// The dispatch origin for this call must be the beneficiary of this bounty. + /// + /// - `bounty_id`: Bounty ID to claim. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::claim_bounty()] + fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { + let _ = ensure_signed(origin)?; // anyone can trigger claim + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; + if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { + ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); + let bounty_account = Self::bounty_account_id(bounty_id); + let balance = T::Currency::free_balance(&bounty_account); + let fee = bounty.fee.min(balance); // just to be safe + let payout = balance.saturating_sub(fee); + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail + let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + *maybe_bounty = None; + + BountyDescriptions::remove(bounty_id); + + Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); + Ok(()) + } else { + Err(Error::::UnexpectedStatus.into()) + } + })?; + } + + /// Cancel a proposed or active bounty. All the funds will be sent to treasury and + /// the curator deposit will be unreserved if possible. + /// + /// Only `T::RejectOrigin` is able to cancel a bounty. + /// + /// - `bounty_id`: Bounty ID to cancel. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::close_bounty_proposed().max(::WeightInfo::close_bounty_active())] + fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { + T::RejectOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { + let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; + + match &bounty.status { + BountyStatus::Proposed => { + // The reject origin would like to cancel a proposed bounty. + BountyDescriptions::remove(bounty_id); + let value = bounty.bond; + let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; + T::OnSlash::on_unbalanced(imbalance); + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyRejected(bounty_id, value)); + // Return early, nothing else to do. + return Ok(Some(::WeightInfo::close_bounty_proposed()).into()) + }, + BountyStatus::Approved => { + // For weight reasons, we don't allow a council to cancel in this phase. + // We ask for them to wait until it is funded before they can cancel. + return Err(Error::::UnexpectedStatus.into()) + }, + BountyStatus::Funded | + BountyStatus::CuratorProposed { .. } => { + // Nothing extra to do besides the removal of the bounty below. + }, + BountyStatus::Active { curator, .. } => { + // Cancelled by council, refund deposit of the working curator. + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + // Then execute removal of the bounty below. + }, + BountyStatus::PendingPayout { .. } => { + // Bounty is already pending payout. If council wants to cancel + // this bounty, it should mean the curator was acting maliciously. + // So the council should first unassign the curator, slashing their + // deposit. + return Err(Error::::PendingPayout.into()) + } + } + + let bounty_account = Self::bounty_account_id(bounty_id); + + BountyDescriptions::remove(bounty_id); + + let balance = T::Currency::free_balance(&bounty_account); + let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyCanceled(bounty_id)); + Ok(Some(::WeightInfo::close_bounty_active()).into()) + }) + } + + /// Extend the expiry time of an active bounty. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to extend. + /// - `remark`: additional information. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::extend_bounty_expiry()] + fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::Active { ref curator, ref mut update_due } => { + ensure!(*curator == signer, Error::::RequireCurator); + *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyExtended(bounty_id)); + } + } +} + +impl Module { + // Add public immutables and private mutables. + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::ModuleId::get().into_account() + } + + /// The account ID of a bounty account + pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { + // only use two byte prefix to support 16 byte account id (used by test) + // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index + T::ModuleId::get().into_sub_account(("bt", id)) + } + + fn create_bounty( + proposer: T::AccountId, + description: Vec, + value: BalanceOf, + ) -> DispatchResult { + ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); + ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); + + let index = Self::bounty_count(); + + // reserve deposit for new bounty + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (description.len() as u32).into(); + T::Currency::reserve(&proposer, bond) + .map_err(|_| Error::::InsufficientProposersBalance)?; + + BountyCount::put(index + 1); + + let bounty = Bounty { + proposer, + value, + fee: 0u32.into(), + curator_deposit: 0u32.into(), + bond, + status: BountyStatus::Proposed, + }; + + Bounties::::insert(index, &bounty); + BountyDescriptions::insert(index, description); + + Self::deposit_event(RawEvent::BountyProposed(index)); + + Ok(()) + } + +} + +impl pallet_treasury::SpendFunds for Module { + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool + ) { + let bounties_len = BountyApprovals::mutate(|v| { + let bounties_approval_len = v.len() as u32; + v.retain(|&index| { + Bounties::::mutate(index, |bounty| { + // Should always be true, but shouldn't panic if false or we're screwed. + if let Some(bounty) = bounty { + if bounty.value <= *budget_remaining { + *budget_remaining -= bounty.value; + + bounty.status = BountyStatus::Funded; + + // return their deposit. + let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); + + // fund the bounty account + imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); + + Self::deposit_event(RawEvent::BountyBecameActive(index)); + false + } else { + *missed_any = true; + true + } + } else { + false + } + }) + }); + bounties_approval_len + }); + + *total_weight += ::WeightInfo::spend_funds(bounties_len); + } +} diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs new file mode 100644 index 0000000000000..cbff502daa65e --- /dev/null +++ b/frame/bounties/src/tests.rs @@ -0,0 +1,900 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! bounties pallet tests. + +#![cfg(test)] + +use crate as pallet_bounties; +use super::*; +use std::cell::RefCell; + +use frame_support::{ + assert_noop, assert_ok, parameter_types, weights::Weight, traits::OnInitialize +}; + +use sp_core::H256; +use sp_runtime::{ + Perbill, ModuleId, + testing::Header, + traits::{BlakeTwo256, IdentityLookup, BadOrigin}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Bounties: pallet_bounties::{Module, Call, Storage, Event}, + Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const DataDepositPerByte: u64 = 1; + pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); +} +// impl pallet_treasury::Config for Test { +impl pallet_treasury::Config for Test { + type ModuleId = TreasuryModuleId; + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = Bounties; +} +parameter_types! { + pub const BountyDepositBase: u64 = 80; + pub const BountyDepositPayoutDelay: u64 = 3; + pub const BountyUpdatePeriod: u32 = 20; + pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); + pub const BountyValueMinimum: u64 = 1; + pub const MaximumReasonLength: u32 = 16384; +} +impl Config for Test { + type Event = Event; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type WeightInfo = (); +} + +type TreasuryError = pallet_treasury::Error::; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + t.into() +} + +fn last_event() -> RawEvent { + System::events().into_iter().map(|r| r.event) + .filter_map(|e| { + if let Event::pallet_bounties(inner) = e { Some(inner) } else { None } + }) + .last() + .unwrap() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +#[test] +fn minting_works() { + new_test_ext().execute_with(|| { + // Check that accumulate works when we have Some value in Dummy already. + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn spend_proposal_takes_min_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_eq!(Balances::free_balance(0), 99); + assert_eq!(Balances::reserved_balance(0), 1); + }); +} + +#[test] +fn spend_proposal_takes_proportional_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + }); +} + +#[test] +fn spend_proposal_fails_when_proposer_poor() { + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::propose_spend(Origin::signed(2), 100, 3), + TreasuryError::InsufficientProposersBalance, + ); + }); +} + +#[test] +fn accepted_spend_proposal_ignored_outside_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(1); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn unused_pot_should_diminish() { + new_test_ext().execute_with(|| { + let init_total_issuance = Balances::total_issuance(); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Balances::total_issuance(), init_total_issuance + 100); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 50); + assert_eq!(Balances::total_issuance(), init_total_issuance + 50); + }); +} + +#[test] +fn rejected_spend_proposal_ignored_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 50); + }); +} + +#[test] +fn reject_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + }); +} + +#[test] +fn reject_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), + pallet_treasury::Error::::InvalidIndex); + }); +} + +#[test] +fn accept_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + }); +} + +#[test] +fn accept_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + }); +} + +#[test] +fn accepted_spend_proposal_enacted_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Treasury::pot(), 0); + }); +} + +#[test] +fn pot_underflow_should_not_diminish() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); + >::on_initialize(4); + assert_eq!(Balances::free_balance(3), 150); // Fund has been spent + assert_eq!(Treasury::pot(), 25); // Pot has finally changed + }); +} + +// Treasury account doesn't get deleted if amount approved to spend is all its free balance. +// i.e. pot should not include existential deposit needed for account survival. +#[test] +fn treasury_account_doesnt_get_deleted() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + let treasury_balance = Balances::free_balance(&Treasury::account_id()); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + + >::on_initialize(4); + assert_eq!(Treasury::pot(), 0); // Pot is emptied + assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there + }); +} + +// In case treasury account is not existing then it works fine. +// This is useful for chain that will just update runtime. +#[test] +fn inexistent_account_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + balances: vec![(0, 100), (1, 99), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + // Treasury genesis config is not build thus treasury account does not exist + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist + assert_eq!(Treasury::pot(), 0); // Pot is empty + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 0); // Pot hasn't changed + assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed + + Balances::make_free_balance_be(&Treasury::account_id(), 100); + assert_eq!(Treasury::pot(), 99); // Pot now contains funds + assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist + + >::on_initialize(4); + + assert_eq!(Treasury::pot(), 0); // Pot has changed + assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed + }); +} + +#[test] +fn propose_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); + + assert_eq!(last_event(), RawEvent::BountyProposed(0)); + + let deposit: u64 = 85 + 5; + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 10, + bond: deposit, + status: BountyStatus::Proposed, + }); + + assert_eq!(Bounties::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); + + assert_eq!(Bounties::bounty_count(), 1); + }); +} + +#[test] +fn propose_bounty_validation_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), + Error::::ReasonTooBig + ); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), + Error::::InsufficientProposersBalance + ); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), + Error::::InvalidValue + ); + }); +} + +#[test] +fn close_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); + + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); + + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Bounties::bounties(0), None); + assert!(!pallet_treasury::Proposals::::contains_key(0)); + + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn approve_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Bounties::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + value: 50, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Approved, + }); + assert_eq!(Bounties::bounty_approvals(), vec![0]); + + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); + + // deposit not returned yet + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + >::on_initialize(2); + + // return deposit + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: deposit, + status: BountyStatus::Funded, + }); + + assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); + }); +} + +#[test] +fn assign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_noop!(Bounties::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_noop!(Bounties::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { + curator: 4, + }, + }); + + assert_noop!(Bounties::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); + assert_noop!(Bounties::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::Active { + curator: 4, + update_due: 22, + }, + }); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 2); + }); +} + +#[test] +fn unassign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + assert_noop!(Bounties::unassign_curator(Origin::signed(1), 0), BadOrigin); + + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 + }); +} + + +#[test] +fn award_and_claim_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit + + assert_noop!(Bounties::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); + + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::PendingPayout { + curator: 4, + beneficiary: 3, + unlock_at: 5 + }, + }); + + assert_noop!(Bounties::claim_bounty(Origin::signed(1), 0), Error::::Premature); + + System::set_block_number(5); + >::on_initialize(5); + + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); + + assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 + + assert_eq!(Balances::free_balance(3), 56); + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn claim_handles_high_fee() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 30); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 49)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + + System::set_block_number(5); + >::on_initialize(5); + + // make fee > balance + let _ = Balances::slash(&Bounties::bounty_account_id(0), 10); + + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); + + assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn cancel_and_refund() { + new_test_ext().execute_with(|| { + + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 60); + + assert_noop!(Bounties::close_bounty(Origin::signed(0), 0), BadOrigin); + + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + assert_eq!(Treasury::pot(), 85); // - 25 + 10 + + }); + +} + +#[test] +fn award_and_cancel() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 0, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(0), 0)); + + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + + assert_ok!(Bounties::award_bounty(Origin::signed(0), 0, 3)); + + // Cannot close bounty directly when payout is happening... + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::PendingPayout); + + // Instead unassign the curator to slash them and then close. + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + assert_eq!(last_event(), RawEvent::BountyCanceled(0)); + + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + // Slashed. + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn expire_and_unassign() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 5); + + System::set_block_number(22); + >::on_initialize(22); + + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + + System::set_block_number(23); + >::on_initialize(23); + + assert_ok!(Bounties::unassign_curator(Origin::signed(0), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 0); // slashed + + }); +} + +#[test] +fn extend_expiry() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + assert_noop!(Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 5); + assert_eq!(Balances::reserved_balance(4), 5); + + System::set_block_number(10); + >::on_initialize(10); + + assert_noop!(Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, + }); + + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same + }); + + System::set_block_number(25); + >::on_initialize(25); + + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 10); // not slashed + assert_eq!(Balances::reserved_balance(4), 0); + }); +} + +#[test] +fn genesis_funding_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let initial_funding = 100; + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized with 100. + balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding); + assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); + }); +} diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs new file mode 100644 index 0000000000000..fcbee727abe5a --- /dev/null +++ b/frame/bounties/src/weights.rs @@ -0,0 +1,189 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_bounties +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2020-12-16, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bounties +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/bounties/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bounties. +pub trait WeightInfo { + fn propose_bounty(d: u32, ) -> Weight; + fn approve_bounty() -> Weight; + fn propose_curator() -> Weight; + fn unassign_curator() -> Weight; + fn accept_curator() -> Weight; + fn award_bounty() -> Weight; + fn claim_bounty() -> Weight; + fn close_bounty_proposed() -> Weight; + fn close_bounty_active() -> Weight; + fn extend_bounty_expiry() -> Weight; + fn spend_funds(b: u32, ) -> Weight; +} + +/// Weights for pallet_bounties using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn propose_bounty(d: u32, ) -> Weight { + (64_778_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn approve_bounty() -> Weight { + (18_293_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn propose_curator() -> Weight { + (14_248_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn unassign_curator() -> Weight { + (52_100_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn accept_curator() -> Weight { + (52_564_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn award_bounty() -> Weight { + (37_426_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn claim_bounty() -> Weight { + (176_077_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn close_bounty_proposed() -> Weight { + (51_162_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn close_bounty_active() -> Weight { + (116_907_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn extend_bounty_expiry() -> Weight { + (36_419_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn spend_funds(b: u32, ) -> Weight { + (7_562_000 as Weight) + // Standard Error: 16_000 + .saturating_add((77_328_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn propose_bounty(d: u32, ) -> Weight { + (64_778_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn approve_bounty() -> Weight { + (18_293_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn propose_curator() -> Weight { + (14_248_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn unassign_curator() -> Weight { + (52_100_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn accept_curator() -> Weight { + (52_564_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn award_bounty() -> Weight { + (37_426_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn claim_bounty() -> Weight { + (176_077_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn close_bounty_proposed() -> Weight { + (51_162_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn close_bounty_active() -> Weight { + (116_907_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn extend_bounty_expiry() -> Weight { + (36_419_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn spend_funds(b: u32, ) -> Weight { + (7_562_000 as Weight) + // Standard Error: 16_000 + .saturating_add((77_328_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index fd302fb836579..0c58f41640100 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-collective" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index d4e80d515941f..bff7dad59d891 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,17 +33,16 @@ const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: Instance>(generic_event: >::Event) { +fn assert_last_event, I: Instance>(generic_event: >::Event) { let events = System::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } benchmarks_instance! { - _{ } - + set_members { let m in 1 .. T::MaxMembers::get(); let n in 1 .. T::MaxMembers::get(); diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index b7d561672b82f..50beb8607d61d 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,7 +40,7 @@ //! If there are not, or if no prime is set, then the motion is dropped without being executed. #![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit="128"] +#![recursion_limit = "128"] use sp_std::{prelude::*, result}; use sp_core::u32_trait::Value as U32; @@ -56,7 +56,7 @@ use frame_support::{ }, ensure, traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers}, - weights::{DispatchClass, GetDispatchInfo, Weight}, + weights::{DispatchClass, GetDispatchInfo, Weight, Pays}, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -121,18 +121,18 @@ impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The outer origin type. type Origin: From>; /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable>::Origin, PostInfo=PostDispatchInfo> + + Dispatchable>::Origin, PostInfo=PostDispatchInfo> + From> + GetDispatchInfo; /// The outer event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The time-out for council motions. type MotionDuration: Get; @@ -166,7 +166,7 @@ pub enum RawOrigin { } /// Origin for the collective module. -pub type Origin = RawOrigin<::AccountId, I>; +pub type Origin = RawOrigin<::AccountId, I>; #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] /// Info for keeping track of a motion being voted on. @@ -184,12 +184,12 @@ pub struct Votes { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Collective { + trait Store for Module, I: Instance=DefaultInstance> as Collective { /// The hashes of the active proposals. pub Proposals get(fn proposals): Vec; /// Actual proposal for a given hash, if it's current. pub ProposalOf get(fn proposal_of): - map hasher(identity) T::Hash => Option<>::Proposal>; + map hasher(identity) T::Hash => Option<>::Proposal>; /// Votes on a given proposal, if it is ongoing. pub Voting get(fn voting): map hasher(identity) T::Hash => Option>; @@ -209,8 +209,8 @@ decl_storage! { decl_event! { pub enum Event where - ::Hash, - ::AccountId, + ::Hash, + ::AccountId, { /// A motion (given hash) has been proposed (by given account) with a threshold (given /// `MemberCount`). @@ -239,7 +239,7 @@ decl_event! { } decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Account is not a member NotMember, /// Duplicate proposals not allowed @@ -276,7 +276,7 @@ fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { // Note that councillor operations are assigned to the operational class. decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { type Error = Error; fn deposit_event() = default; @@ -365,7 +365,7 @@ decl_module! { DispatchClass::Operational )] fn execute(origin, - proposal: Box<>::Proposal>, + proposal: Box<>::Proposal>, #[compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -432,7 +432,7 @@ decl_module! { )] fn propose(origin, #[compact] threshold: MemberCount, - proposal: Box<>::Proposal>, + proposal: Box<>::Proposal>, #[compact] length_bound: u32 ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -488,6 +488,8 @@ decl_module! { /// /// Requires the sender to be a member. /// + /// Transaction fees will be waived if the member is voting on any particular proposal + /// for the first time and the call is successful. Subsequent vote changes will charge a fee. /// # /// ## Weight /// - `O(M)` where `M` is members-count (code- and governance-bounded) @@ -515,6 +517,9 @@ decl_module! { let position_yes = voting.ayes.iter().position(|a| a == &who); let position_no = voting.nays.iter().position(|a| a == &who); + // Detects first vote of the member in the motion + let is_account_voting_first_time = position_yes.is_none() && position_no.is_none(); + if approve { if position_yes.is_none() { voting.ayes.push(who.clone()); @@ -541,7 +546,17 @@ decl_module! { Voting::::insert(&proposal, voting); - Ok(Some(T::WeightInfo::vote(members.len() as u32)).into()) + if is_account_voting_first_time { + Ok(( + Some(T::WeightInfo::vote(members.len() as u32)), + Pays::No, + ).into()) + } else { + Ok(( + Some(T::WeightInfo::vote(members.len() as u32)), + Pays::Yes, + ).into()) + } } /// Close a vote that is either approved, disapproved or whose voting period has ended. @@ -554,6 +569,9 @@ decl_module! { /// If called after the end of the voting period abstentions are counted as rejections /// unless there is a prime member set and the prime member cast an approval. /// + /// If the close operation completes successfully with disapproval, the transaction fee will + /// be waived. Otherwise execution of the approved operation will be charged to the caller. + /// /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed proposal. /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via /// `storage::read` so it is `size_of::() == 4` larger than the pure length. @@ -606,20 +624,23 @@ decl_module! { let (proposal, len) = Self::validate_and_get_proposal( &proposal_hash, length_bound, - proposal_weight_bound + proposal_weight_bound, )?; Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let (proposal_weight, proposal_count) = Self::do_approve_proposal(seats, voting, proposal_hash, proposal); - return Ok(Some( - T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight) + return Ok(( + Some(T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight)), + Pays::Yes, ).into()); + } else if disapproved { Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(Some( - T::WeightInfo::close_early_disapproved(seats, proposal_count) + return Ok(( + Some(T::WeightInfo::close_early_disapproved(seats, proposal_count)), + Pays::No, ).into()); } @@ -642,20 +663,22 @@ decl_module! { let (proposal, len) = Self::validate_and_get_proposal( &proposal_hash, length_bound, - proposal_weight_bound + proposal_weight_bound, )?; Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let (proposal_weight, proposal_count) = Self::do_approve_proposal(seats, voting, proposal_hash, proposal); - return Ok(Some( - T::WeightInfo::close_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight) + return Ok(( + Some(T::WeightInfo::close_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight)), + Pays::Yes, ).into()); } else { Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(Some( - T::WeightInfo::close_disapproved(seats, proposal_count) + return Ok(( + Some(T::WeightInfo::close_disapproved(seats, proposal_count)), + Pays::No, ).into()); } } @@ -682,7 +705,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Check whether `who` is a member of the collective. pub fn is_member(who: &T::AccountId) -> bool { // Note: The dispatchables *do not* use this to check membership so make sure @@ -698,7 +721,7 @@ impl, I: Instance> Module { hash: &T::Hash, length_bound: u32, weight_bound: Weight - ) -> Result<(>::Proposal, usize), DispatchError> { + ) -> Result<(>::Proposal, usize), DispatchError> { let key = ProposalOf::::hashed_key_for(hash); // read the length of the proposal storage entry directly let proposal_len = storage::read(&key, &mut [0; 0], 0) @@ -728,7 +751,7 @@ impl, I: Instance> Module { seats: MemberCount, voting: Votes, proposal_hash: T::Hash, - proposal: >::Proposal, + proposal: >::Proposal, ) -> (Weight, u32) { Self::deposit_event(RawEvent::Approved(proposal_hash)); @@ -764,7 +787,7 @@ impl, I: Instance> Module { } } -impl, I: Instance> ChangeMembers for Module { +impl, I: Instance> ChangeMembers for Module { /// Update the members of the collective. Votes are updated and the prime is reset. /// /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but @@ -817,9 +840,13 @@ impl, I: Instance> ChangeMembers for Module { fn set_prime(prime: Option) { Prime::::set(prime); } + + fn get_prime() -> Option { + Prime::::get() + } } -impl, I: Instance> InitializeMembers for Module { +impl, I: Instance> InitializeMembers for Module { fn initialize_members(members: &[T::AccountId]) { if !members.is_empty() { assert!(>::get().is_empty(), "Members are already initialized!"); @@ -933,27 +960,29 @@ impl< #[cfg(test)] mod tests { use super::*; - use frame_support::{Hashable, assert_ok, assert_noop, parameter_types, weights::Weight}; + use frame_support::{Hashable, assert_ok, assert_noop, parameter_types}; use frame_system::{self as system, EventRecord, Phase}; use hex_literal::hex; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, testing::Header, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, BuildStorage, }; use crate as collective; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const MotionDuration: u64 = 3; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -965,21 +994,15 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -989,7 +1012,7 @@ mod tests { type DefaultVote = PrimeDefaultVote; type WeightInfo = (); } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -999,7 +1022,7 @@ mod tests { type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; type WeightInfo = (); } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -1441,6 +1464,96 @@ mod tests { }); } + #[test] + fn motions_all_first_vote_free_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + let end = 4; + assert_ok!( + Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len, + ) + ); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) + ); + + // For the motion, acc 2's first vote, expecting Ok with Pays::No. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(2), + hash.clone(), + 0, + true, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); + + // Duplicate vote, expecting error with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(2), + hash.clone(), + 0, + true, + ); + assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); + + // Modifying vote, expecting ok with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(2), + hash.clone(), + 0, + false, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); + + // For the motion, acc 3's first vote, expecting Ok with Pays::No. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(3), + hash.clone(), + 0, + true, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); + + // acc 3 modify the vote, expecting Ok with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(3), + hash.clone(), + 0, + false, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); + + // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info + + let proposal_weight = proposal.get_dispatch_info().weight; + let close_rval: DispatchResultWithPostInfo = Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len, + ); + assert_eq!(close_rval.unwrap().pays_fee, Pays::No); + + // trying to close the proposal, which is already closed. + // Expecting error "ProposalMissing" with Pays::Yes + let close_rval: DispatchResultWithPostInfo = Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len, + ); + assert_eq!(close_rval.unwrap_err().post_info.pays_fee, Pays::Yes); + }); + } + #[test] fn motions_reproposing_disapproved_works() { new_test_ext().execute_with(|| { diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 4e4ec5196d0a7..f8558c833f016 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -58,7 +58,7 @@ pub trait WeightInfo { /// Weights for pallet_collective using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { (0 as Weight) .saturating_add((20_933_000 as Weight).saturating_mul(m as Weight)) diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index ffcb37385849a..c5ba615504c6b 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -9,31 +9,39 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for WASM contracts" readme = "README.md" +# Prevent publish until we are ready to release 3.0.0 +publish = false + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "common" } +pallet-contracts-proc-macro = { version = "0.1.0", path = "proc-macro" } parity-wasm = { version = "0.41.0", default-features = false } -pwasm-utils = { version = "0.14.0", default-features = false } +pwasm-utils = { version = "0.16", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0", default-features = false, path = "../../primitives/sandbox" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-sandbox = { version = "0.9.0", default-features = false, path = "../../primitives/sandbox" } wasmi-validation = { version = "0.3.0", default-features = false } +# Only used in benchmarking to generate random contract code +rand = { version = "0.7.0", optional = true, default-features = false } +rand_pcg = { version = "0.2.1", optional = true } + [dev-dependencies] assert_matches = "1.3.0" hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0", path = "../randomness-collective-flip" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-timestamp = { version = "3.0.0", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "3.0.0", path = "../randomness-collective-flip" } paste = "1.0" pretty_assertions = "0.6.1" wat = "1.0" @@ -54,7 +62,10 @@ std = [ "pwasm-utils/std", "wasmi-validation/std", "pallet-contracts-primitives/std", + "pallet-contracts-proc-macro/full", ] runtime-benchmarks = [ "frame-benchmarking", + "rand", + "rand_pcg", ] diff --git a/frame/contracts/README.md b/frame/contracts/README.md index dddcc3c8b8b85..8397d2f6bf005 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -42,23 +42,19 @@ fails, A can decide how to handle that failure, either proceeding or reverting A ### Dispatchable functions -* `put_code` - Stores the given binary Wasm code into the chain's storage and returns its `code_hash`. -* `instantiate` - Deploys a new contract from the given `code_hash`, optionally transferring some balance. -This instantiates a new smart contract account and calls its contract deploy handler to -initialize the contract. -* `call` - Makes a call to an account, optionally transferring some balance. +Those are documented in the reference documentation of the `Module`. ## Usage The Contract module is a work in progress. The following examples show how this Contract module can be used to instantiate and call contracts. -* [`ink`](https://github.com/paritytech/ink) is +- [`ink`](https://github.com/paritytech/ink) is an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing WebAssembly based smart contracts in the Rust programming language. This is a work in progress. ## Related Modules -* [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/) +- [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index e87cad055aff5..f385a7ae9f2ff 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # This crate should not rely on any of the frame primitives. bitflags = "1.0" -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/common/src/lib.rs index 9da105cf2d80a..2b325d63d628d 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/common/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! A crate that hosts a common definitions that are relevant for the pallet-contracts. diff --git a/frame/contracts/fixtures/call_return_code.wat b/frame/contracts/fixtures/call_return_code.wat index f7a7ff20a49e3..4e9ab4dd77ce1 100644 --- a/frame/contracts/fixtures/call_return_code.wat +++ b/frame/contracts/fixtures/call_return_code.wat @@ -1,5 +1,5 @@ -;; This calls Django (4) and transfers 100 balance during this call and copies the return code -;; of this call to the output buffer. +;; This calls the supplied dest and transfers 100 balance during this call and copies +;; the return code of this call to the output buffer. ;; It also forwards its input to the callee. (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) @@ -7,38 +7,36 @@ (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) address of django - (data (i32.const 0) "\04\00\00\00\00\00\00\00") + ;; [0, 8) 100 balance + (data (i32.const 0) "\64\00\00\00\00\00\00\00") - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [8, 12) here we store the return code of the transfer - ;; [16, 20) here we store the return code of the transfer + ;; [12, 16) size of the input data + (data (i32.const 12) "\24") - ;; [20, 24) here we store the input data - - ;; [24, 28) size of the input data - (data (i32.const 24) "\04") + ;; [16, inf) here we store the input data + ;; 32 byte dest + 4 byte forward (func (export "deploy")) (func (export "call") - (call $seal_input (i32.const 20) (i32.const 24)) + (call $seal_input (i32.const 16) (i32.const 12)) (i32.store - (i32.const 16) + (i32.const 8) (call $seal_call - (i32.const 0) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 16) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address - (i32.load (i32.const 24)) ;; Length of input data buffer + (i32.const 48) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Ptr to output buffer len ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index 408af92e18296..d6564117b721f 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -2,7 +2,9 @@ (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "seal0" "seal_println" (func $seal_println (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -71,6 +73,8 @@ (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -98,6 +102,9 @@ (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le + ) ) @@ -114,7 +121,7 @@ ;; Length of the output buffer (i32.store (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 8) + (i32.const 256) ) ;; Deploy the contract successfully. @@ -131,6 +138,8 @@ (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -142,7 +151,7 @@ ;; Check that address has the expected length (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 8)) + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 32)) ) ;; Check that balance has been deducted. @@ -169,7 +178,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. @@ -205,7 +214,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 1) ;; Supply too little gas (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. @@ -242,7 +251,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. diff --git a/frame/contracts/fixtures/chain_extension.wat b/frame/contracts/fixtures/chain_extension.wat new file mode 100644 index 0000000000000..db7e83fd96b42 --- /dev/null +++ b/frame/contracts/fixtures/chain_extension.wat @@ -0,0 +1,46 @@ +;; Call chain extension by passing through input and output of this contract +(module + (import "seal0" "seal_call_chain_extension" + (func $seal_call_chain_extension (param i32 i32 i32 i32 i32) (result i32)) + ) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 16 16)) + + (func $assert (param i32) + (block $ok + (br_if $ok (get_local 0)) + (unreachable) + ) + ) + + ;; [0, 4) len of input output + (data (i32.const 0) "\02") + + ;; [4, 12) buffer for input + + ;; [12, 16) len of output buffer + (data (i32.const 12) "\02") + + ;; [16, inf) buffer for output + + (func (export "deploy")) + + (func (export "call") + (call $seal_input (i32.const 4) (i32.const 0)) + + ;; the chain extension passes through the input and returns it as output + (call $seal_call_chain_extension + (i32.load8_u (i32.const 4)) ;; func_id + (i32.const 4) ;; input_ptr + (i32.load (i32.const 0)) ;; input_len + (i32.const 16) ;; output_ptr + (i32.const 12) ;; output_len_ptr + ) + + ;; the chain extension passes through the func_id + (call $assert (i32.eq (i32.load8_u (i32.const 4)))) + + (call $seal_return (i32.const 0) (i32.const 16) (i32.load (i32.const 12))) + ) +) diff --git a/frame/contracts/fixtures/destroy_and_transfer.wat b/frame/contracts/fixtures/destroy_and_transfer.wat index 3220f4e612d7d..7e1d84f3cf98a 100644 --- a/frame/contracts/fixtures/destroy_and_transfer.wat +++ b/frame/contracts/fixtures/destroy_and_transfer.wat @@ -4,7 +4,9 @@ (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) ;; [0, 8) Endowment to send when creating contract. @@ -16,14 +18,18 @@ ;; [48, 80) Buffer where to store the input to the contract - ;; [80, 88) Buffer where to store the address of the instantiated contract - ;; [88, 96) Size of the buffer - (data (i32.const 88) "\08") + (data (i32.const 88) "\FF") ;; [96, 100) Size of the input buffer (data (i32.const 96) "\20") + ;; [100, 132) Buffer where to store the address of the instantiated contract + + ;; [132, 134) Salt + (data (i32.const 132) "\47\11") + + (func $assert (param i32) (block $ok (br_if $ok @@ -54,10 +60,12 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer - (i32.const 80) ;; Buffer where to store address of new contract + (i32.const 100) ;; Buffer where to store address of new contract (i32.const 88) ;; Pointer to the length of the buffer (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this cas + (i32.const 0) ;; Length is ignored in this case + (i32.const 132) ;; salt_ptr + (i32.const 2) ;; salt_len ) (i32.const 0) ) @@ -67,15 +75,15 @@ (call $assert (i32.eq (i32.load (i32.const 88)) - (i32.const 8) + (i32.const 32) ) ) ;; Store the return address. (call $seal_set_storage (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value - (i32.const 8) ;; Length of the value + (i32.const 100) ;; Pointer to the value + (i32.const 32) ;; Length of the value ) ) @@ -85,7 +93,7 @@ (i32.eq (call $seal_get_storage (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value + (i32.const 100) ;; Pointer to the value (i32.const 88) ;; Pointer to the len of the value ) (i32.const 0) @@ -94,7 +102,7 @@ (call $assert (i32.eq (i32.load (i32.const 88)) - (i32.const 8) + (i32.const 32) ) ) @@ -102,8 +110,8 @@ (call $assert (i32.eq (call $seal_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer @@ -121,8 +129,8 @@ (call $assert (i32.eq (call $seal_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 8) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer @@ -141,8 +149,8 @@ (call $assert (i32.eq (call $seal_transfer - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer ) diff --git a/frame/contracts/fixtures/drain.wat b/frame/contracts/fixtures/drain.wat index 9180047f5d015..546026ac95986 100644 --- a/frame/contracts/fixtures/drain.wat +++ b/frame/contracts/fixtures/drain.wat @@ -38,7 +38,7 @@ (i32.eq (call $seal_transfer (i32.const 16) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 32) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer ) diff --git a/frame/contracts/fixtures/instantiate_return_code.wat b/frame/contracts/fixtures/instantiate_return_code.wat index 20ab96d88ad2e..544489329cfad 100644 --- a/frame/contracts/fixtures/instantiate_return_code.wat +++ b/frame/contracts/fixtures/instantiate_return_code.wat @@ -1,47 +1,49 @@ -;; This instantiats Charlie (3) and transfers 100 balance during this call and copies the return code +;; This instantiats a contract and transfers 100 balance during this call and copies the return code ;; of this call to the output buffer. ;; The first 32 byte of input is the code hash to instantiate ;; The rest of the input is forwarded to the constructor of the callee (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) address of django - (data (i32.const 0) "\04\00\00\00\00\00\00\00") + ;; [0, 8) 10_000 balance + (data (i32.const 0) "\10\27\00\00\00\00\00\00") - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [8, 12) here we store the return code of the transfer - ;; [16, 20) here we store the return code of the transfer + ;; [12, 16) size of the input buffer + (data (i32.const 12) "\24") - ;; [20, 24) size of the input buffer - (data (i32.const 20) "\FF") - - ;; [24, inf) input buffer + ;; [16, inf) input buffer + ;; 32 bye code hash + 4 byte forward (func (export "deploy")) (func (export "call") - (call $seal_input (i32.const 24) (i32.const 20)) + (call $seal_input (i32.const 16) (i32.const 12)) (i32.store - (i32.const 16) + (i32.const 8) (call $seal_instantiate - (i32.const 24) ;; Pointer to the code hash. + (i32.const 16) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 56) ;; Pointer to input data buffer address - (i32.sub (i32.load (i32.const 20)) (i32.const 32)) ;; Length of input data buffer + (i32.const 48) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy address (i32.const 0) ;; Length is ignored in this case (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_len ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 3c15f7ae0881e..3462af2870816 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -19,20 +19,19 @@ (func (export "call") ;; copy code hash to contract memory - (call $seal_input (i32.const 264) (i32.const 304)) + (call $seal_input (i32.const 308) (i32.const 304)) (call $assert (i32.eq (i32.load (i32.const 304)) - (i32.const 32) + (i32.const 64) ) ) - (call $seal_restore_to ;; Pointer and length of the encoded dest buffer. - (i32.const 256) - (i32.const 8) + (i32.const 340) + (i32.const 32) ;; Pointer and length of the encoded code hash buffer - (i32.const 264) + (i32.const 308) (i32.const 32) ;; Pointer and length of the encoded rent_allowance buffer (i32.const 296) @@ -65,14 +64,12 @@ ;; Buffer that has ACL storage keys. (data (i32.const 100) "\01") - ;; Address of bob - (data (i32.const 256) "\02\00\00\00\00\00\00\00") - - ;; [264, 296) Code hash of SET_RENT (copied here by seal_input) - ;; [296, 304) Rent allowance (data (i32.const 296) "\32\00\00\00\00\00\00\00") - ;; [304, 308) Size of SET_RENT buffer - (data (i32.const 304) "\20") + ;; [304, 308) Size of the buffer that holds code_hash + addr + (data (i32.const 304) "\40") + + ;; [308, 340) code hash of bob (copied by seal_input) + ;; [340, 372) addr of bob (copied by seal_input) ) diff --git a/frame/contracts/fixtures/self_destruct.wat b/frame/contracts/fixtures/self_destruct.wat index 6898e746b0836..b8a37306e2011 100644 --- a/frame/contracts/fixtures/self_destruct.wat +++ b/frame/contracts/fixtures/self_destruct.wat @@ -5,20 +5,23 @@ (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $seal_address output + ;; [0, 32) reserved for $seal_address output - ;; [8, 16) length of the buffer - (data (i32.const 8) "\08") + ;; [32, 36) length of the buffer + (data (i32.const 32) "\20") - ;; [16, 24) Address of django - (data (i32.const 16) "\04\00\00\00\00\00\00\00") + ;; [36, 68) Address of django + (data (i32.const 36) + "\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04" + "\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04" + ) - ;; [24, 32) reserved for output of $seal_input + ;; [68, 72) reserved for output of $seal_input - ;; [32, 36) length of the buffer - (data (i32.const 32) "\04") + ;; [72, 76) length of the buffer + (data (i32.const 72) "\04") - ;; [36, inf) zero initialized + ;; [76, inf) zero initialized (func $assert (param i32) (block $ok @@ -36,16 +39,16 @@ ;; This should trap instead of self-destructing since a contract cannot be removed live in ;; the execution stack cannot be removed. If the recursive call traps, then trap here as ;; well. - (call $seal_input (i32.const 24) (i32.const 32)) - (if (i32.load (i32.const 32)) + (call $seal_input (i32.const 68) (i32.const 72)) + (if (i32.load (i32.const 72)) (then - (call $seal_address (i32.const 0) (i32.const 8)) + (call $seal_address (i32.const 0) (i32.const 32)) ;; Expect address to be 8 bytes. (call $assert (i32.eq - (i32.load (i32.const 8)) - (i32.const 8) + (i32.load (i32.const 32)) + (i32.const 32) ) ) @@ -54,9 +57,9 @@ (i32.eq (call $seal_call (i32.const 0) ;; Pointer to own address - (i32.const 8) ;; Length of own address + (i32.const 32) ;; Length of own address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 76) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer @@ -70,8 +73,8 @@ (else ;; Try to terminate and give balance to django. (call $seal_terminate - (i32.const 16) ;; Pointer to beneficiary address - (i32.const 8) ;; Length of beneficiary address + (i32.const 36) ;; Pointer to beneficiary address + (i32.const 32) ;; Length of beneficiary address ) (unreachable) ;; seal_terminate never returns ) diff --git a/frame/contracts/fixtures/self_destructing_constructor.wat b/frame/contracts/fixtures/self_destructing_constructor.wat index ab8c289f1b564..85fce511e21b9 100644 --- a/frame/contracts/fixtures/self_destructing_constructor.wat +++ b/frame/contracts/fixtures/self_destructing_constructor.wat @@ -15,7 +15,7 @@ ;; Self-destruct by sending full balance to the 0 address. (call $seal_terminate (i32.const 0) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 32) ;; Length of destination address ) ) diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat index a09d3dc4bd47a..4abb7ffe9dbbb 100644 --- a/frame/contracts/fixtures/set_rent.wat +++ b/frame/contracts/fixtures/set_rent.wat @@ -26,7 +26,7 @@ (func $call_2 (call $assert (i32.eq - (call $seal_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) + (call $seal_transfer (i32.const 136) (i32.const 32) (i32.const 100) (i32.const 8)) (i32.const 0) ) ) @@ -47,10 +47,11 @@ ;; Dispatch the call according to input size (func (export "call") (local $input_size i32) - (i32.store (i32.const 64) (i32.const 64)) - (call $seal_input (i32.const 1024) (i32.const 64)) + ;; 4 byte i32 for br_table followed by 32 byte destination for transfer + (i32.store (i32.const 128) (i32.const 36)) + (call $seal_input (i32.const 132) (i32.const 128)) (set_local $input_size - (i32.load (i32.const 64)) + (i32.load (i32.const 132)) ) (block $IF_ELSE (block $IF_2 @@ -81,25 +82,24 @@ (i32.const 0) (i32.const 4) ) + (i32.store (i32.const 128) (i32.const 64)) (call $seal_input - (i32.const 0) - (i32.const 64) + (i32.const 132) + (i32.const 128) ) (call $seal_set_rent_allowance - (i32.const 0) - (i32.load (i32.const 64)) + (i32.const 132) + (i32.load (i32.const 128)) ) ) ;; Encoding of 10 in balance (data (i32.const 0) "\28") - ;; Size of the buffer at address 0 - (data (i32.const 64) "\40") + ;; encoding of 50 balance + (data (i32.const 100) "\32") - ;; encoding of Charlies's account id - (data (i32.const 68) "\03") + ;; [128, 132) size of seal input buffer - ;; encoding of 50 balance - (data (i32.const 76) "\32") + ;; [132, inf) output buffer for seal input ) diff --git a/frame/contracts/fixtures/transfer_return_code.wat b/frame/contracts/fixtures/transfer_return_code.wat index 7a1bec9adf38c..50098851dcf81 100644 --- a/frame/contracts/fixtures/transfer_return_code.wat +++ b/frame/contracts/fixtures/transfer_return_code.wat @@ -5,27 +5,30 @@ (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) zero-adress - (data (i32.const 0) "\00\00\00\00\00\00\00\00") + ;; [0, 32) zero-adress + (data (i32.const 0) + "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" + "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" + ) - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [32, 40) 100 balance + (data (i32.const 32) "\64\00\00\00\00\00\00\00") - ;; [16, 20) here we store the return code of the transfer + ;; [40, 44) here we store the return code of the transfer (func (export "deploy")) (func (export "call") (i32.store - (i32.const 16) + (i32.const 40) (call $seal_transfer (i32.const 0) ;; ptr to destination address - (i32.const 8) ;; length of destination address - (i32.const 8) ;; ptr to value to transfer + (i32.const 32) ;; length of destination address + (i32.const 32) ;; ptr to value to transfer (i32.const 8) ;; length of value to transfer ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 40) (i32.const 4)) ) ) diff --git a/frame/contracts/proc-macro/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml new file mode 100644 index 0000000000000..56ef855335571 --- /dev/null +++ b/frame/contracts/proc-macro/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "pallet-contracts-proc-macro" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Procedural macros used in pallet_contracts" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "1" +quote = "1" +syn = "1" + +[dev-dependencies] + +[features] +# If set the full output is generated. Do NOT set when generating for wasm runtime. +full = [] diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs new file mode 100644 index 0000000000000..6fc2fbe82e037 --- /dev/null +++ b/frame/contracts/proc-macro/src/lib.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Proc macros used in the contracts module. + +#![no_std] + +extern crate alloc; + +use proc_macro2::TokenStream; +use quote::{quote, quote_spanned}; +use syn::spanned::Spanned; +use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Fields, Ident}; +use alloc::string::ToString; + +/// This derives `Debug` for a struct where each field must be of some numeric type. +/// It interprets each field as its represents some weight and formats it as times so that +/// it is readable by humans. +#[proc_macro_derive(WeightDebug)] +pub fn derive_weight_debug(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + derive_debug(input, format_weight) +} + +/// This is basically identical to the std libs Debug derive but without adding any +/// bounds to existing generics. +#[proc_macro_derive(ScheduleDebug)] +pub fn derive_schedule_debug(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + derive_debug(input, format_default) +} + +fn derive_debug( + input: proc_macro::TokenStream, + fmt: impl Fn(&Ident) -> TokenStream +) -> proc_macro::TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let name = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let data = if let Data::Struct(data) = &input.data { + data + } else { + return quote_spanned! { + name.span() => + compile_error!("WeightDebug is only supported for structs."); + }.into(); + }; + + #[cfg(feature = "full")] + let fields = iterate_fields(data, fmt); + + #[cfg(not(feature = "full"))] + let fields = { + drop(fmt); + drop(data); + TokenStream::new() + }; + + let tokens = quote! { + impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause { + fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use ::sp_runtime::{FixedPointNumber, FixedU128 as Fixed}; + let mut formatter = formatter.debug_struct(stringify!(#name)); + #fields + formatter.finish() + } + } + }; + + tokens.into() +} + +/// This is only used then the `full` feature is activated. +#[cfg(feature = "full")] +fn iterate_fields(data: &DataStruct, fmt: impl Fn(&Ident) -> TokenStream) -> TokenStream { + match &data.fields { + Fields::Named(fields) => { + let recurse = fields.named + .iter() + .filter_map(|f| { + let name = f.ident.as_ref()?; + if name.to_string().starts_with('_') { + return None; + } + let value = fmt(name); + let ret = quote_spanned!{ f.span() => + formatter.field(stringify!(#name), #value); + }; + Some(ret) + }); + quote!{ + #( #recurse )* + } + } + Fields::Unnamed(fields) => quote_spanned!{ + fields.span() => + compile_error!("Unnamed fields are not supported") + }, + Fields::Unit => quote!(), + } +} + +fn format_weight(field: &Ident) -> TokenStream { + quote_spanned! { field.span() => + &if self.#field > 1_000_000_000 { + format!( + "{:.1?} ms", + Fixed::saturating_from_rational(self.#field, 1_000_000_000).to_fraction() + ) + } else if self.#field > 1_000_000 { + format!( + "{:.1?} µs", + Fixed::saturating_from_rational(self.#field, 1_000_000).to_fraction() + ) + } else if self.#field > 1_000 { + format!( + "{:.1?} ns", + Fixed::saturating_from_rational(self.#field, 1_000).to_fraction() + ) + } else { + format!("{} ps", self.#field) + } + } +} + +fn format_default(field: &Ident) -> TokenStream { + quote_spanned! { field.span() => + &self.#field + } +} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index 587abcbcddaec..06c3c7d243e0f 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0" +version = "0.8.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,21 +8,22 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Node-specific RPC methods for interaction with contracts." readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-rpc = { version = "3.0.0", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } pallet-contracts-primitives = { version = "2.0.0", path = "../common" } pallet-contracts-rpc-runtime-api = { version = "0.8.0", path = "./runtime-api" } diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 04becf2b45f49..0794fee292842 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0" +version = "0.8.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,15 +8,16 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by Contracts RPC extensions." readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } +sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../../primitives/runtime" } pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../common" } [features] diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs index 94b9fe7967c08..6f0399586fa22 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 6d43ea75c035f..e0a056906f743 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,7 +33,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, DispatchError, }; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; use pallet_contracts_primitives::ContractExecResult; pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; @@ -76,10 +76,10 @@ impl From for Error { #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] -pub struct CallRequest { +pub struct CallRequest { origin: AccountId, dest: AccountId, - value: Balance, + value: number::NumberOrHex, gas_limit: number::NumberOrHex, input_data: Bytes, } @@ -141,7 +141,7 @@ pub trait ContractsApi { #[rpc(name = "contracts_call")] fn call( &self, - call_request: CallRequest, + call_request: CallRequest, at: Option, ) -> Result; @@ -201,11 +201,11 @@ where <::Header as HeaderT>::Number, >, AccountId: Codec, - Balance: Codec, + Balance: Codec + TryFrom, { fn call( &self, - call_request: CallRequest, + call_request: CallRequest, at: Option<::Hash>, ) -> Result { let api = self.client.runtime_api(); @@ -221,6 +221,13 @@ where input_data, } = call_request; + // Make sure that value fits into the balance type. + let value: Balance = value.try_into().map_err(|_| Error { + code: ErrorCode::InvalidParams, + message: format!("{:?} doesn't fit into the balance type", value), + data: None, + })?; + // Make sure that gas_limit fits into 64 bits. let gas_limit: u64 = gas_limit.try_into().map_err(|_| Error { code: ErrorCode::InvalidParams, @@ -305,17 +312,18 @@ mod tests { #[test] fn call_request_should_serialize_deserialize_properly() { - type Req = CallRequest; + type Req = CallRequest; let req: Req = serde_json::from_str(r#" { "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", - "value": 0, + "value": "0x112210f4B16c1cb1", "gasLimit": 1000000000000, "inputData": "0x8c97db39" } "#).unwrap(); assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); + assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); } #[test] diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index dc3730e95ca1f..01ca7d3aac22d 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,32 +24,55 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::Trait; +use crate::Config; use crate::Module as Contracts; use parity_wasm::elements::{Instruction, Instructions, FuncBody, ValueType, BlockType}; +use pwasm_utils::stack_height::inject_limiter; +use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; +use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; use sp_std::{prelude::*, convert::TryFrom}; /// Pass to `create_code` in order to create a compiled `WasmModule`. +/// +/// This exists to have a more declarative way to describe a wasm module than to use +/// parity-wasm directly. It is tailored to fit the structure of contracts that are +/// needed for benchmarking. +#[derive(Default)] pub struct ModuleDefinition { - pub data_segments: Vec, + /// Imported memory attached to the module. No memory is imported if `None`. pub memory: Option, + /// Initializers for the imported memory. + pub data_segments: Vec, + /// Creates the supplied amount of i64 mutable globals initialized with random values. + pub num_globals: u32, + /// List of functions that the module should import. They start with index 0. pub imported_functions: Vec, + /// Function body of the exported `deploy` function. Body is empty if `None`. + /// Its index is `imported_functions.len()`. pub deploy_body: Option, + /// Function body of the exported `call` function. Body is empty if `None`. + /// Its index is `imported_functions.len() + 1`. pub call_body: Option, + /// Function body of a non-exported function with index `imported_functions.len() + 2`. + pub aux_body: Option, + /// The amount of I64 arguments the aux function should have. + pub aux_arg_num: u32, + /// If set to true the stack height limiter is injected into the the module. This is + /// needed for instruction debugging because the cost of executing the stack height + /// instrumentation should be included in the costs for the individual instructions + /// that cause more metering code (only call). + pub inject_stack_metering: bool, + /// Create a table containing function pointers. + pub table: Option, } -impl Default for ModuleDefinition { - fn default() -> Self { - Self { - data_segments: vec![], - memory: None, - imported_functions: vec![], - deploy_body: None, - call_body: None, - } - } +pub struct TableSegment { + /// How many elements should be created inside the table. + pub num_elements: u32, + /// The function index with which all table elements should be initialized. + pub function_index: u32, } pub struct DataSegment { @@ -57,13 +80,18 @@ pub struct DataSegment { pub value: Vec, } +#[derive(Clone)] pub struct ImportedMemory { pub min_pages: u32, pub max_pages: u32, } impl ImportedMemory { - pub fn max() -> Self { + pub fn max() -> Self + where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + { let pages = max_pages::(); Self { min_pages: pages, max_pages: pages } } @@ -75,14 +103,19 @@ pub struct ImportedFunction { pub return_type: Option, } -/// A wasm module ready to be put on chain with `put_code`. +/// A wasm module ready to be put on chain. #[derive(Clone)] -pub struct WasmModule { +pub struct WasmModule { pub code: Vec, pub hash: ::Output, + memory: Option, } -impl From for WasmModule { +impl From for WasmModule +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ fn from(def: ModuleDefinition) -> Self { // internal functions start at that offset. let func_offset = u32::try_from(def.imported_functions.len()).unwrap(); @@ -91,14 +124,14 @@ impl From for WasmModule { let mut contract = parity_wasm::builder::module() // deploy function (first internal function) .function() - .signature().with_params(vec![]).with_return_type(None).build() + .signature().with_return_type(None).build() .with_body(def.deploy_body.unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty()) )) .build() // call function (second internal function) .function() - .signature().with_params(vec![]).with_return_type(None).build() + .signature().with_return_type(None).build() .with_body(def.call_body.unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty()) )) @@ -106,8 +139,19 @@ impl From for WasmModule { .export().field("deploy").internal().func(func_offset).build() .export().field("call").internal().func(func_offset + 1).build(); + // If specified we add an additional internal function + if let Some(body) = def.aux_body { + let mut signature = contract + .function() + .signature().with_return_type(None); + for _ in 0 .. def.aux_arg_num { + signature = signature.with_param(ValueType::I64); + } + contract = signature.build().with_body(body).build(); + } + // Grant access to linear memory. - if let Some(memory) = def.memory { + if let Some(memory) = &def.memory { contract = contract.import() .module("env").field("memory") .external().memory(memory.min_pages, Some(memory.max_pages)) @@ -136,28 +180,81 @@ impl From for WasmModule { .build() } - let code = contract.build().to_bytes().unwrap(); + // Add global variables + if def.num_globals > 0 { + use rand::{prelude::*, distributions::Standard}; + let rng = rand_pcg::Pcg32::seed_from_u64(3112244599778833558); + for val in rng.sample_iter(Standard).take(def.num_globals as usize) { + contract = contract + .global() + .value_type().i64() + .mutable() + .init_expr(Instruction::I64Const(val)) + .build() + } + } + + // Add function pointer table + if let Some(table) = def.table { + contract = contract + .table() + .with_min(table.num_elements) + .with_max(Some(table.num_elements)) + .with_element(0, vec![table.function_index; table.num_elements as usize]) + .build(); + } + + let mut code = contract.build(); + + // Inject stack height metering + if def.inject_stack_metering { + code = inject_limiter( + code, + Contracts::::current_schedule().limits.stack_height + ) + .unwrap(); + } + + let code = code.to_bytes().unwrap(); let hash = T::Hashing::hash(&code); Self { code, - hash + hash, + memory: def.memory, } } } -impl WasmModule { +impl WasmModule +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Creates a wasm module with an empty `call` and `deploy` function and nothing else. pub fn dummy() -> Self { ModuleDefinition::default().into() } + /// Same as `dummy` but with maximum sized linear memory. + pub fn dummy_with_mem() -> Self { + ModuleDefinition { + memory: Some(ImportedMemory::max::()), + .. Default::default() + } + .into() + } + + /// Creates a wasm module of `target_bytes` size. Used to benchmark the performance of + /// `instantiate_with_code` for different sizes of wasm modules. The generated module maximizes + /// instrumentation runtime by nesting blocks as deeply as possible given the byte budget. pub fn sized(target_bytes: u32) -> Self { use parity_wasm::elements::Instruction::{If, I32Const, Return, End}; - // Base size of a contract is 47 bytes and each expansion adds 6 bytes. + // Base size of a contract is 63 bytes and each expansion adds 6 bytes. // We do one expansion less to account for the code section and function body // size fields inside the binary wasm module representation which are leb128 encoded // and therefore grow in size when the contract grows. We are not allowed to overshoot - // because of the maximum code size that is enforced by `put_code`. - let expansions = (target_bytes.saturating_sub(47) / 6).saturating_sub(1); + // because of the maximum code size that is enforced by `instantiate_with_code`. + let expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); const EXPANSION: [Instruction; 4] = [ I32Const(0), If(BlockType::NoResult), @@ -166,11 +263,15 @@ impl WasmModule { ]; ModuleDefinition { call_body: Some(body::repeated(expansions, &EXPANSION)), + memory: Some(ImportedMemory::max::()), .. Default::default() } .into() } + /// Creates a wasm module that calls the imported function named `getter_name` `repeat` + /// times. The imported function is expected to have the "getter signature" of + /// (out_ptr: u32, len_ptr: u32) -> (). pub fn getter(getter_name: &'static str, repeat: u32) -> Self { let pages = max_pages::(); ModuleDefinition { @@ -198,11 +299,14 @@ impl WasmModule { .into() } + /// Creates a wasm module that calls the imported hash function named `name` `repeat` times + /// with an input of size `data_size`. Hash functions have the signature + /// (input_ptr: u32, input_len: u32, output_ptr: u32) -> () pub fn hasher(name: &'static str, repeat: u32, data_size: u32) -> Self { ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - name: name, + name, params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], @@ -216,16 +320,84 @@ impl WasmModule { } .into() } + + /// Creates a memory instance for use in a sandbox with dimensions declared in this module + /// and adds it to `env`. A reference to that memory is returned so that it can be used to + /// access the memory contents from the supervisor. + pub fn add_memory(&self, env: &mut EnvironmentDefinitionBuilder) -> Option { + let memory = if let Some(memory) = &self.memory { + memory + } else { + return None; + }; + let memory = Memory::new(memory.min_pages, Some(memory.max_pages)).unwrap(); + env.add_memory("env", "memory", memory.clone()); + Some(memory) + } + + pub fn unary_instr(instr: Instruction, repeat: u32) -> Self { + use body::DynInstr::{RandomI64Repeated, Regular}; + ModuleDefinition { + call_body: Some(body::repeated_dyn(repeat, vec![ + RandomI64Repeated(1), + Regular(instr), + Regular(Instruction::Drop), + ])), + .. Default::default() + }.into() + } + + pub fn binary_instr(instr: Instruction, repeat: u32) -> Self { + use body::DynInstr::{RandomI64Repeated, Regular}; + ModuleDefinition { + call_body: Some(body::repeated_dyn(repeat, vec![ + RandomI64Repeated(2), + Regular(instr), + Regular(Instruction::Drop), + ])), + .. Default::default() + }.into() + } } -/// Mechanisms to create a function body that can be used inside a `ModuleDefinition`. +/// Mechanisms to generate a function body that can be used inside a `ModuleDefinition`. pub mod body { use super::*; - pub enum CountedInstruction { - // (offset, increment_by) - Counter(u32, u32), + /// When generating contract code by repeating a wasm sequence, it's sometimes necessary + /// to change those instructions on each repetition. The variants of this enum describe + /// various ways in which this can happen. + pub enum DynInstr { + /// Insert the associated instruction. Regular(Instruction), + /// Insert a I32Const with incrementing value for each insertion. + /// (start_at, increment_by) + Counter(u32, u32), + /// Insert a I32Const with a random value in [low, high) not divisible by two. + /// (low, high) + RandomUnaligned(u32, u32), + /// Insert a I32Const with a random value in [low, high). + /// (low, high) + RandomI32(i32, i32), + /// Insert the specified amount of I32Const with a random value. + RandomI32Repeated(usize), + /// Insert the specified amount of I64Const with a random value. + RandomI64Repeated(usize), + /// Insert a GetLocal with a random offset in [low, high). + /// (low, high) + RandomGetLocal(u32, u32), + /// Insert a SetLocal with a random offset in [low, high). + /// (low, high) + RandomSetLocal(u32, u32), + /// Insert a TeeLocal with a random offset in [low, high). + /// (low, high) + RandomTeeLocal(u32, u32), + /// Insert a GetGlobal with a random offset in [low, high). + /// (low, high) + RandomGetGlobal(u32, u32), + /// Insert a SetGlobal with a random offset in [low, high). + /// (low, high) + RandomSetGlobal(u32, u32) } pub fn plain(instructions: Vec) -> FuncBody { @@ -245,28 +417,77 @@ pub mod body { FuncBody::new(Vec::new(), instructions) } - pub fn counted(repetitions: u32, mut instructions: Vec) -> FuncBody { + pub fn repeated_dyn(repetitions: u32, mut instructions: Vec) -> FuncBody { + use rand::{prelude::*, distributions::Standard}; + + // We do not need to be secure here. + let mut rng = rand_pcg::Pcg32::seed_from_u64(8446744073709551615); + // We need to iterate over indices because we cannot cycle over mutable references let body = (0..instructions.len()) .cycle() .take(instructions.len() * usize::try_from(repetitions).unwrap()) - .map(|idx| { + .flat_map(|idx| match &mut instructions[idx] { - CountedInstruction::Counter(offset, increment_by) => { + DynInstr::Regular(instruction) => vec![instruction.clone()], + DynInstr::Counter(offset, increment_by) => { let current = *offset; *offset += *increment_by; - Instruction::I32Const(current as i32) + vec![Instruction::I32Const(current as i32)] + }, + DynInstr::RandomUnaligned(low, high) => { + let unaligned = rng.gen_range(*low, *high) | 1; + vec![Instruction::I32Const(unaligned as i32)] + }, + DynInstr::RandomI32(low, high) => { + vec![Instruction::I32Const(rng.gen_range(*low, *high))] + }, + DynInstr::RandomI32Repeated(num) => { + (&mut rng).sample_iter(Standard).take(*num).map(|val| + Instruction::I32Const(val) + ) + .collect() + }, + DynInstr::RandomI64Repeated(num) => { + (&mut rng).sample_iter(Standard).take(*num).map(|val| + Instruction::I64Const(val) + ) + .collect() + }, + DynInstr::RandomGetLocal(low, high) => { + vec![Instruction::GetLocal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomSetLocal(low, high) => { + vec![Instruction::SetLocal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomTeeLocal(low, high) => { + vec![Instruction::TeeLocal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomGetGlobal(low, high) => { + vec![Instruction::GetGlobal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomSetGlobal(low, high) => { + vec![Instruction::SetGlobal(rng.gen_range(*low, *high))] }, - CountedInstruction::Regular(instruction) => instruction.clone(), } - }) + ) .chain(sp_std::iter::once(Instruction::End)) .collect(); FuncBody::new(Vec::new(), Instructions::new(body)) } + + /// Replace the locals of the supplied `body` with `num` i64 locals. + pub fn inject_locals(body: &mut FuncBody, num: u32) { + use parity_wasm::elements::Local; + *body.locals_mut() = (0..num).map(|i| Local::new(i, ValueType::I64)).collect() + } } /// The maximum amount of pages any contract is allowed to have according to the current `Schedule`. -pub fn max_pages() -> u32 { - Contracts::::current_schedule().max_memory_pages +pub fn max_pages() -> u32 +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + Contracts::::current_schedule().limits.memory_pages } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 79863afc4419a..a5dcc40d71ba6 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,27 +20,37 @@ #![cfg(feature = "runtime-benchmarks")] mod code; - -use crate::*; -use crate::Module as Contracts; -use crate::exec::StorageKey; -use crate::schedule::API_BENCHMARK_BATCH_SIZE; -use self::code::{ - body, ModuleDefinition, DataSegment, ImportedMemory, ImportedFunction, WasmModule, +mod sandbox; + +use crate::{ + *, Module as Contracts, + exec::StorageKey, + rent::Rent, + schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, + storage::Storage, +}; +use self::{ + code::{ + body::{self, DynInstr::*}, + ModuleDefinition, DataSegment, ImportedMemory, ImportedFunction, WasmModule, + }, + sandbox::Sandbox, }; - use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use frame_system::{Module as System, RawOrigin}; use parity_wasm::elements::{Instruction, ValueType, BlockType}; -use sp_runtime::traits::{Hash, Bounded}; -use sp_std::{default::Default, convert::{TryInto}}; +use sp_runtime::traits::{Hash, Bounded, Zero}; +use sp_std::{default::Default, convert::{TryInto}, vec::Vec, vec}; use pallet_contracts_primitives::RentProjection; /// How many batches we do per API benchmark. const API_BENCHMARK_BATCHES: u32 = 20; +/// How many batches we do per Instruction benchmark. +const INSTR_BENCHMARK_BATCHES: u32 = 1; + /// An instantiated and deployed contract. -struct Contract { +struct Contract { caller: T::AccountId, account_id: T::AccountId, addr: ::Source, @@ -62,12 +72,16 @@ impl Endow { /// The maximum amount of balance a caller can transfer without being brought below /// the existential deposit. This assumes that every caller is funded with the amount /// returned by `caller_funding`. - fn max() -> BalanceOf { + fn max() -> BalanceOf { caller_funding::().saturating_sub(T::Currency::minimum_balance()) } } -impl Contract { +impl Contract +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Create new contract and use a default account id as instantiator. fn new( module: WasmModule, @@ -95,35 +109,42 @@ impl Contract { endowment: Endow, ) -> Result, &'static str> { - use sp_runtime::traits::{CheckedDiv, SaturatedConversion}; let (storage_size, endowment) = match endowment { Endow::CollectRent => { // storage_size cannot be zero because otherwise a contract that is just above // the subsistence threshold does not pay rent given a large enough subsistence // threshold. But we need rent payments to occur in order to benchmark for worst cases. - let storage_size = Config::::subsistence_threshold_uncached() - .checked_div(&T::RentDepositOffset::get()) - .unwrap_or_else(Zero::zero); + let storage_size = u32::max_value() / 10; // Endowment should be large but not as large to inhibit rent payments. - let endowment = T::RentDepositOffset::get() - .saturating_mul(storage_size + T::StorageSizeOffset::get().into()) - .saturating_sub(1u32.into()); + // Balance will only cover half the storage + let endowment = T::DepositPerStorageByte::get() + .saturating_mul(>::from(storage_size) / 2u32.into()) + .saturating_add(T::DepositPerContract::get()); (storage_size, endowment) }, Endow::Max => (0u32.into(), Endow::max::()), }; T::Currency::make_free_balance_be(&caller, caller_funding::()); - let addr = T::DetermineContractAddress::contract_address_for(&module.hash, &data, &caller); - init_block_number::(); - Contracts::::put_code_raw(module.code)?; + let salt = vec![0xff]; + let addr = Contracts::::contract_address(&caller, &module.hash, &salt); + + // The default block number is zero. The benchmarking system bumps the block number + // to one for the benchmarking closure when it is set to zero. In order to prevent this + // undesired implicit bump (which messes with rent collection), we do the bump ourselves + // in the setup closure so that both the instantiate and subsequent call are run with the + // same block number. + System::::set_block_number(1u32.into()); + + Contracts::::store_code_raw(module.code)?; Contracts::::instantiate( RawOrigin::Signed(caller.clone()).into(), endowment, Weight::max_value(), module.hash, data, + salt, )?; let result = Contract { @@ -135,7 +156,7 @@ impl Contract { }; let mut contract = result.alive_info()?; - contract.storage_size = storage_size.saturated_into::(); + contract.storage_size = storage_size; ContractInfoOf::::insert(&result.account_id, ContractInfo::Alive(contract)); Ok(result) @@ -145,7 +166,7 @@ impl Contract { fn store(&self, items: &Vec<(StorageKey, Vec)>) -> Result<(), &'static str> { let info = self.alive_info()?; for item in items { - crate::storage::write_contract_storage::( + Storage::::write( &self.account_id, &info.trie_id, &item.0, @@ -177,7 +198,7 @@ impl Contract { /// Get the block number when this contract will be evicted. Returns an error when /// the rent collection won't happen because the contract has to much endowment. fn eviction_at(&self) -> Result { - let projection = crate::rent::compute_rent_projection::(&self.account_id) + let projection = Rent::>::compute_projection(&self.account_id) .map_err(|_| "Invalid acc for rent")?; match projection { RentProjection::EvictionAt(at) => Ok(at), @@ -186,37 +207,56 @@ impl Contract { } } -/// A `Contract` that was evicted after accumulating some storage. +/// A `Contract` that contains some storage items. /// -/// This is used to benchmark contract resurrection. -struct Tombstone { +/// This is used to benchmark contract destruction and resurection. Those operations' +/// weight depend on the amount of storage accumulated. +struct ContractWithStorage { /// The contract that was evicted. contract: Contract, /// The storage the contract held when it was avicted. storage: Vec<(StorageKey, Vec)>, } -impl Tombstone { - /// Create and evict a new contract with the supplied storage item count and size each. +impl ContractWithStorage +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Same as [`Self::with_code`] but with dummy contract code. fn new(stor_num: u32, stor_size: u32) -> Result { - let contract = Contract::::new(WasmModule::dummy(), vec![], Endow::CollectRent)?; + Self::with_code(WasmModule::dummy(), stor_num, stor_size) + } + + /// Create and evict a new contract with the supplied storage item count and size each. + fn with_code(code: WasmModule, stor_num: u32, stor_size: u32) -> Result { + let contract = Contract::::new(code, vec![], Endow::CollectRent)?; let storage_items = create_storage::(stor_num, stor_size)?; contract.store(&storage_items)?; - System::::set_block_number( - contract.eviction_at()? + T::SignedClaimHandicap::get() + 5u32.into() - ); - crate::rent::collect_rent::(&contract.account_id); - contract.ensure_tombstone()?; - - Ok(Tombstone { + Ok(Self { contract, storage: storage_items, }) } + + /// Increase the system block number so that this contract is eligible for eviction. + fn set_block_num_for_eviction(&self) -> Result<(), &'static str> { + System::::set_block_number( + self.contract.eviction_at()? + T::SignedClaimHandicap::get() + 5u32.into() + ); + Ok(()) + } + + /// Evict this contract. + fn evict(&mut self) -> Result<(), &'static str> { + self.set_block_num_for_eviction()?; + Rent::>::try_eviction(&self.contract.account_id, Zero::zero())?; + self.contract.ensure_tombstone() + } } /// Generate `stor_num` storage items. Each has the size `stor_size`. -fn create_storage( +fn create_storage( stor_num: u32, stor_size: u32 ) -> Result)>, &'static str> { @@ -230,23 +270,38 @@ fn create_storage( } /// The funding that each account that either calls or instantiates contracts is funded with. -fn caller_funding() -> BalanceOf { +fn caller_funding() -> BalanceOf { BalanceOf::::max_value() / 2u32.into() } -/// Set the block number to one. -/// -/// The default block number is zero. The benchmarking system bumps the block number -/// to one for the benchmarking closure when it is set to zero. In order to prevent this -/// undesired implicit bump (which messes with rent collection), wo do the bump ourselfs -/// in the setup closure so that both the instantiate and subsequent call are run with the -/// same block number. -fn init_block_number() { - System::::set_block_number(1u32.into()); -} - benchmarks! { - _ { + where_clause { where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + } + + // The base weight without any actual work performed apart from the setup costs. + on_initialize {}: { + Storage::::process_deletion_queue_batch(Weight::max_value()) + } + + on_initialize_per_trie_key { + let k in 0..1024; + let instance = ContractWithStorage::::new(k, T::MaxValueSize::get())?; + Storage::::queue_trie_for_deletion(&instance.contract.alive_info()?)?; + }: { + Storage::::process_deletion_queue_batch(Weight::max_value()) + } + + on_initialize_per_queue_item { + let q in 0..1024.min(T::DeletionQueueDepth::get()); + for i in 0 .. q { + let instance = Contract::::with_index(i, WasmModule::dummy(), vec![], Endow::Max)?; + Storage::::queue_trie_for_deletion(&instance.alive_info()?)?; + ContractInfoOf::::remove(instance.account_id); + } + }: { + Storage::::process_deletion_queue_batch(Weight::max_value()) } // This extrinsic is pretty much constant as it is only a simple setter. @@ -259,30 +314,43 @@ benchmarks! { // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. - // `n`: Size of the code in kilobytes. - put_code { - let n in 0 .. Contracts::::current_schedule().max_code_size / 1024; + // The size of the salt influences the runtime because is is hashed in order to + // determine the contract address. + // `c`: Size of the code in kilobytes. + // `s`: Size of the salt in kilobytes. + instantiate_with_code { + let c in 0 .. Contracts::::current_schedule().limits.code_size / 1024; + let s in 0 .. code::max_pages::() * 64; + let salt = vec![42u8; (s * 1024) as usize]; + let endowment = caller_funding::() / 3u32.into(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let module = WasmModule::::sized(n * 1024); - let origin = RawOrigin::Signed(caller); - }: _(origin, module.code) + let WasmModule { code, hash, .. } = WasmModule::::sized(c * 1024); + let origin = RawOrigin::Signed(caller.clone()); + let addr = Contracts::::contract_address(&caller, &hash, &salt); + }: _(origin, endowment, Weight::max_value(), code, vec![], salt) + verify { + // endowment was removed from the caller + assert_eq!(T::Currency::free_balance(&caller), caller_funding::() - endowment); + // contract has the full endowment because no rent collection happended + assert_eq!(T::Currency::free_balance(&addr), endowment); + // instantiate should leave a alive contract + Contract::::address_alive_info(&addr)?; + } // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. - // The size of the input data influences the runtime because it is hashed in order to determine - // the contract address. - // `n`: Size of the data passed to constructor in kilobytes. + // `s`: Size of the salt in kilobytes. instantiate { - let n in 0 .. code::max_pages::() * 64; - let data = vec![42u8; (n * 1024) as usize]; - let endowment = Config::::subsistence_threshold_uncached(); + let s in 0 .. code::max_pages::() * 64; + let salt = vec![42u8; (s * 1024) as usize]; + let endowment = caller_funding::() / 3u32.into(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let WasmModule { code, hash } = WasmModule::::dummy(); + let WasmModule { code, hash, .. } = WasmModule::::dummy_with_mem(); let origin = RawOrigin::Signed(caller.clone()); - let addr = T::DetermineContractAddress::contract_address_for(&hash, &data, &caller); - Contracts::::put_code_raw(code)?; - }: _(origin, endowment, Weight::max_value(), hash, data) + let addr = Contracts::::contract_address(&caller, &hash, &salt); + Contracts::::store_code_raw(code)?; + }: _(origin, endowment, Weight::max_value(), hash, vec![], salt) verify { // endowment was removed from the caller assert_eq!(T::Currency::free_balance(&caller), caller_funding::() - endowment); @@ -300,7 +368,7 @@ benchmarks! { call { let data = vec![42u8; 1024]; let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], Endow::CollectRent + whitelisted_caller(), WasmModule::dummy_with_mem(), vec![], Endow::CollectRent )?; let value = T::Currency::minimum_balance() * 100u32.into(); let origin = RawOrigin::Signed(instance.caller.clone()); @@ -348,9 +416,15 @@ benchmarks! { instance.ensure_tombstone()?; // the caller should get the reward for being a good snitch - assert_eq!( - T::Currency::free_balance(&instance.caller), - caller_funding::() - instance.endowment + ::SurchargeReward::get(), + // this is capped by the maximum amount of rent payed. So we only now that it should + // have increased by at most the surcharge reward. + assert!( + T::Currency::free_balance(&instance.caller) > + caller_funding::() - instance.endowment + ); + assert!( + T::Currency::free_balance(&instance.caller) <= + caller_funding::() - instance.endowment + ::SurchargeReward::get(), ); } @@ -626,7 +700,8 @@ benchmarks! { // Restore just moves the trie id from origin to destination and therefore // does not depend on the size of the destination contract. However, to not // trigger any edge case we won't use an empty contract as destination. - let tombstone = Tombstone::::new(10, T::MaxValueSize::get())?; + let mut tombstone = ContractWithStorage::::new(10, T::MaxValueSize::get())?; + tombstone.evict()?; let dest = tombstone.contract.account_id.encode(); let dest_len = dest.len(); @@ -699,7 +774,8 @@ benchmarks! { seal_restore_to_per_delta { let d in 0 .. API_BENCHMARK_BATCHES; - let tombstone = Tombstone::::new(0, 0)?; + let mut tombstone = ContractWithStorage::::new(0, 0)?; + tombstone.evict()?; let delta = create_storage::(d * API_BENCHMARK_BATCH_SIZE, T::MaxValueSize::get())?; let dest = tombstone.contract.account_id.encode(); @@ -783,7 +859,7 @@ benchmarks! { seal_random { let r in 0 .. API_BENCHMARK_BATCHES; let pages = code::max_pages::(); - let subject_len = Contracts::::current_schedule().max_subject_len; + let subject_len = Contracts::::current_schedule().limits.subject_len; assert!(subject_len < 1024); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -839,14 +915,13 @@ benchmarks! { // `t`: Number of topics // `n`: Size of event payload in kb seal_deposit_event_per_topic_and_kb { - let t in 0 .. Contracts::::current_schedule().max_event_topics; + let t in 0 .. Contracts::::current_schedule().limits.event_topics; let n in 0 .. T::MaxValueSize::get() / 1024; let mut topics = (0..API_BENCHMARK_BATCH_SIZE) .map(|n| (n * t..n * t + t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode()) .peekable(); let topics_len = topics.peek().map(|i| i.len()).unwrap_or(0); let topics = topics.flatten().collect(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -860,7 +935,7 @@ benchmarks! { value: topics, }, ], - call_body: Some(body::counted(API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, topics_len as u32), // topics_ptr Regular(Instruction::I32Const(topics_len as i32)), // topics_len Regular(Instruction::I32Const(0)), // data_ptr @@ -911,7 +986,6 @@ benchmarks! { .flat_map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) .collect::>(); let key_len = sp_std::mem::size_of::<::Output>(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -925,7 +999,7 @@ benchmarks! { value: keys, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, key_len as u32), // key_ptr Regular(Instruction::I32Const(0)), // value_ptr Regular(Instruction::I32Const(0)), // value_len @@ -976,7 +1050,6 @@ benchmarks! { .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_len = sp_std::mem::size_of::<::Output>(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -990,7 +1063,7 @@ benchmarks! { value: key_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, key_len as u32), Regular(Instruction::Call(0)), ])), @@ -999,7 +1072,7 @@ benchmarks! { let instance = Contract::::new(code, vec![], Endow::Max)?; let trie_id = instance.alive_info()?.trie_id; for key in keys { - crate::storage::write_contract_storage::( + Storage::::write( &instance.account_id, &trie_id, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, @@ -1019,7 +1092,6 @@ benchmarks! { let key_len = sp_std::mem::size_of::<::Output>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1033,7 +1105,7 @@ benchmarks! { value: key_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, key_len as u32), // key_ptr Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr @@ -1045,7 +1117,7 @@ benchmarks! { let instance = Contract::::new(code, vec![], Endow::Max)?; let trie_id = instance.alive_info()?.trie_id; for key in keys { - crate::storage::write_contract_storage::( + Storage::::write( &instance.account_id, &trie_id, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, @@ -1089,7 +1161,7 @@ benchmarks! { }); let instance = Contract::::new(code, vec![], Endow::Max)?; let trie_id = instance.alive_info()?.trie_id; - crate::storage::write_contract_storage::( + Storage::::write( &instance.account_id, &trie_id, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, @@ -1107,11 +1179,10 @@ benchmarks! { .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); - let value = Config::::subsistence_threshold_uncached(); + let value = Contracts::::subsistence_threshold(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1129,7 +1200,7 @@ benchmarks! { value: account_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(value_len as u32, account_len as u32), // account_ptr Regular(Instruction::I32Const(account_len as i32)), // account_len Regular(Instruction::I32Const(0)), // value_ptr @@ -1154,7 +1225,7 @@ benchmarks! { // We call unique accounts. seal_call { let r in 0 .. API_BENCHMARK_BATCHES; - let dummy_code = WasmModule::::dummy(); + let dummy_code = WasmModule::::dummy_with_mem(); let callees = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![], Endow::Max)) .collect::, _>>()?; @@ -1163,7 +1234,6 @@ benchmarks! { let value: BalanceOf = 0u32.into(); let value_bytes = value.encode(); let value_len = value_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1191,7 +1261,7 @@ benchmarks! { value: callee_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(value_len as u32, callee_len as u32), // callee_ptr Regular(Instruction::I32Const(callee_len as i32)), // callee_len Regular(Instruction::I64Const(0)), // gas @@ -1243,7 +1313,6 @@ benchmarks! { let value: BalanceOf = t.into(); let value_bytes = value.encode(); let value_len = value_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1275,7 +1344,7 @@ benchmarks! { value: (o * 1024).to_le_bytes().into(), }, ], - call_body: Some(body::counted(API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ Counter(value_len as u32, callee_len as u32), // callee_ptr Regular(Instruction::I32Const(callee_len as i32)), // callee_len Regular(Instruction::I64Const(0)), // gas @@ -1300,21 +1369,24 @@ benchmarks! { let hashes = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|i| { let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), call_body: Some(body::plain(vec![ + // we need to add this in order to make contracts unique + // so that they can be deployed from the same sender Instruction::I32Const(i as i32), Instruction::Drop, Instruction::End, ])), .. Default::default() }); - Contracts::::put_code_raw(code.code)?; + Contracts::::store_code_raw(code.code)?; Ok(code.hash) }) .collect::, &'static str>>()?; let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); let hashes_len = hashes_bytes.len(); - let value = Config::::subsistence_threshold_uncached(); + let value = Endow::max::() / (r * API_BENCHMARK_BATCH_SIZE + 2).into(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1326,7 +1398,6 @@ benchmarks! { let addr_len_offset = hashes_offset + hashes_len; let addr_offset = addr_len_offset + addr_len; - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1342,7 +1413,9 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32 + ValueType::I32, + ValueType::I32, + ValueType::I32, ], return_type: Some(ValueType::I32), }], @@ -1360,7 +1433,7 @@ benchmarks! { value: addr_len.to_le_bytes().into(), }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr Regular(Instruction::I32Const(hash_len as i32)), // code_hash_len Regular(Instruction::I64Const(0)), // gas @@ -1372,6 +1445,8 @@ benchmarks! { Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr Regular(Instruction::I32Const(0)), // output_len_ptr + Regular(Instruction::I32Const(0)), // salt_ptr + Regular(Instruction::I32Const(0)), // salt_ptr_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1382,8 +1457,8 @@ benchmarks! { let callee = instance.addr.clone(); let addresses = hashes .iter() - .map(|hash| T::DetermineContractAddress::contract_address_for( - hash, &[], &instance.account_id + .map(|hash| Contracts::::contract_address( + &instance.account_id, hash, &[], )) .collect::>(); @@ -1395,13 +1470,15 @@ benchmarks! { }: call(origin, callee, 0u32.into(), Weight::max_value(), vec![]) verify { for addr in &addresses { - instance.alive_info()?; + ContractInfoOf::::get(&addr).and_then(|c| c.get_alive()) + .ok_or_else(|| "Contract should have been instantiated")?; } } - seal_instantiate_per_input_output_kb { + seal_instantiate_per_input_output_salt_kb { let i in 0 .. (code::max_pages::() - 1) * 64; let o in 0 .. (code::max_pages::() - 1) * 64; + let s in 0 .. (code::max_pages::() - 1) * 64; let callee_code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1425,12 +1502,12 @@ benchmarks! { let hash = callee_code.hash.clone(); let hash_bytes = callee_code.hash.encode(); let hash_len = hash_bytes.len(); - Contracts::::put_code_raw(callee_code.code)?; + Contracts::::store_code_raw(callee_code.code)?; let inputs = (0..API_BENCHMARK_BATCH_SIZE).map(|x| x.encode()).collect::>(); let input_len = inputs.get(0).map(|x| x.len()).unwrap_or(0); let input_bytes = inputs.iter().cloned().flatten().collect::>(); let inputs_len = input_bytes.len(); - let value = Config::::subsistence_threshold_uncached(); + let value = Endow::max::() / (API_BENCHMARK_BATCH_SIZE + 2).into(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1444,7 +1521,6 @@ benchmarks! { let output_len_offset = addr_len_offset + 4; let output_offset = output_len_offset + 4; - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1460,7 +1536,9 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32 + ValueType::I32, + ValueType::I32, + ValueType::I32, ], return_type: Some(ValueType::I32), }], @@ -1486,7 +1564,7 @@ benchmarks! { value: (o * 1024).to_le_bytes().into(), }, ], - call_body: Some(body::counted(API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ Regular(Instruction::I32Const(hash_offset as i32)), // code_hash_ptr Regular(Instruction::I32Const(hash_len as i32)), // code_hash_len Regular(Instruction::I64Const(0)), // gas @@ -1498,6 +1576,8 @@ benchmarks! { Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr Regular(Instruction::I32Const(output_offset as i32)), // output_ptr Regular(Instruction::I32Const(output_len_offset as i32)), // output_len_ptr + Counter(input_offset as u32, input_len as u32), // salt_ptr + Regular(Instruction::I32Const((s * 1024).max(input_len as u32) as i32)), // salt_len Regular(Instruction::Call(0)), Regular(Instruction::I32Eqz), Regular(Instruction::If(BlockType::NoResult)), @@ -1583,6 +1663,781 @@ benchmarks! { ), vec![], Endow::Max)?; let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) + + // We make the assumption that pushing a constant and dropping a value takes roughly + // the same amount of time. We follow that `t.load` and `drop` both have the weight + // of this benchmark / 2. We need to make this assumption because there is no way + // to measure them on their own using a valid wasm module. We need their individual + // values to derive the weight of individual instructions (by substraction) from + // benchmarks that include those for parameter pushing and return type dropping. + // We call the weight of `t.load` and `drop`: `w_param`. + // The weight that would result from the respective benchmark we call: `w_bench`. + // + // w_i{32,64}const = w_drop = w_bench / 2 + instr_i64const { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_i{32,64}load = w_bench - 2 * w_param + instr_i64load { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), + Regular(Instruction::I64Load(3, 0)), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_i{32,64}store{...} = w_bench - 2 * w_param + instr_i64store { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), + RandomI64Repeated(1), + Regular(Instruction::I64Store(3, 0)), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_select = w_bench - 4 * w_param + instr_select { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomI64Repeated(1), + RandomI32(0, 2), + Regular(Instruction::Select), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_if = w_bench - 3 * w_param + instr_if { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32(0, 2), + Regular(Instruction::If(BlockType::Value(ValueType::I64))), + RandomI64Repeated(1), + Regular(Instruction::Else), + RandomI64Repeated(1), + Regular(Instruction::End), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br = w_bench - 2 * w_param + instr_br { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Br(1)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br_if = w_bench - 5 * w_param + // The two additional pushes + drop are only executed 50% of the time. + // Making it: 3 * w_param + (50% * 4 * w_param) + instr_br_if { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + RandomI32(0, 2), + Regular(Instruction::BrIf(1)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br_table = w_bench - 3 * w_param + // 1 * w_param + 0.5 * 2 * w_param + 0.25 * 4 * w_param + instr_br_table { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let table = Box::new(parity_wasm::elements::BrTableData { + table: Box::new([0, 1, 2]), + default: 1, + }); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + RandomI32(0, 4), + Regular(Instruction::BrTable(table)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br_table_per_entry = w_bench + instr_br_table_per_entry { + let e in 1 .. Contracts::::current_schedule().limits.br_table_size; + let entry: Vec = [0, 1].iter() + .cloned() + .cycle() + .take((e / 2) as usize).collect(); + let table = Box::new(parity_wasm::elements::BrTableData { + table: entry.into_boxed_slice(), + default: 0, + }); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + RandomI32(0, (e + 1) as i32), // Make sure the default entry is also used + Regular(Instruction::BrTable(table)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_call = w_bench - 2 * w_param + instr_call { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + // We need to make use of the stack here in order to trigger stack height + // instrumentation. + aux_body: Some(body::plain(vec![ + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + ])), + call_body: Some(body::repeated(r * INSTR_BENCHMARK_BATCH_SIZE, &[ + Instruction::Call(2), // call aux + ])), + inject_stack_metering: true, + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_call_indrect = w_bench - 3 * w_param + instr_call_indirect { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let num_elements = Contracts::::current_schedule().limits.table_size; + use self::code::TableSegment; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + // We need to make use of the stack here in order to trigger stack height + // instrumentation. + aux_body: Some(body::plain(vec![ + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + ])), + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32(0, num_elements as i32), + Regular(Instruction::CallIndirect(0, 0)), // we only have one sig: 0 + ])), + inject_stack_metering: true, + table: Some(TableSegment { + num_elements, + function_index: 2, // aux + }), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_instr_call_indirect_per_param = w_bench - 1 * w_param + // Calling a function indirectly causes it to go through a thunk function whose runtime + // linearly depend on the amount of parameters to this function. + // Please note that this is not necessary with a direct call. + instr_call_indirect_per_param { + let p in 0 .. Contracts::::current_schedule().limits.parameters; + let num_elements = Contracts::::current_schedule().limits.table_size; + use self::code::TableSegment; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + // We need to make use of the stack here in order to trigger stack height + // instrumentation. + aux_body: Some(body::plain(vec![ + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + ])), + aux_arg_num: p, + call_body: Some(body::repeated_dyn(INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(p as usize), + RandomI32(0, num_elements as i32), + Regular(Instruction::CallIndirect(p.min(1), 0)), // aux signature: 1 or 0 + ])), + inject_stack_metering: true, + table: Some(TableSegment { + num_elements, + function_index: 2, // aux + }), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_local_get = w_bench - 1 * w_param + instr_local_get { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_locals = Contracts::::current_schedule().limits.stack_height; + let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomGetLocal(0, max_locals), + Regular(Instruction::Drop), + ]); + body::inject_locals(&mut call_body, max_locals); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(call_body), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_local_set = w_bench - 1 * w_param + instr_local_set { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_locals = Contracts::::current_schedule().limits.stack_height; + let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomSetLocal(0, max_locals), + ]); + body::inject_locals(&mut call_body, max_locals); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(call_body), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_local_tee = w_bench - 2 * w_param + instr_local_tee { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_locals = Contracts::::current_schedule().limits.stack_height; + let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomTeeLocal(0, max_locals), + Regular(Instruction::Drop), + ]); + body::inject_locals(&mut call_body, max_locals); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(call_body), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_global_get = w_bench - 1 * w_param + instr_global_get { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_globals = Contracts::::current_schedule().limits.globals; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomGetGlobal(0, max_globals), + Regular(Instruction::Drop), + ])), + num_globals: max_globals, + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_global_set = w_bench - 1 * w_param + instr_global_set { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_globals = Contracts::::current_schedule().limits.globals; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomSetGlobal(0, max_globals), + ])), + num_globals: max_globals, + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_memory_get = w_bench - 1 * w_param + instr_memory_current { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + call_body: Some(body::repeated(r * INSTR_BENCHMARK_BATCH_SIZE, &[ + Instruction::CurrentMemory(0), + Instruction::Drop + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_memory_grow = w_bench - 2 * w_param + // We can only allow allocate as much memory as it is allowed in a a contract. + // Therefore the repeat count is limited by the maximum memory any contract can have. + // Using a contract with more memory will skew the benchmark because the runtime of grow + // depends on how much memory is already allocated. + instr_memory_grow { + let r in 0 .. 1; + let max_pages = ImportedMemory::max::().max_pages; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory { + min_pages: 0, + max_pages, + }), + call_body: Some(body::repeated(r * max_pages, &[ + Instruction::I32Const(1), + Instruction::GrowMemory(0), + Instruction::Drop, + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // Unary numeric instructions. + // All use w = w_bench - 2 * w_param. + + instr_i64clz { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Clz, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ctz { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Ctz, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64popcnt { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Popcnt, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64eqz { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Eqz, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64extendsi32 { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32Repeated(1), + Regular(Instruction::I64ExtendSI32), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + instr_i64extendui32 { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32Repeated(1), + Regular(Instruction::I64ExtendUI32), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + instr_i32wrapi64 { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I32WrapI64, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + // Binary numeric instructions. + // All use w = w_bench - 3 * w_param. + + instr_i64eq { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Eq, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ne { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Ne, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64lts { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LtS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ltu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LtU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64gts { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GtS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64gtu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GtU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64les { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LeS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64leu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LeU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ges { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GeS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64geu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GeU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64add { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Add, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64sub { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Sub, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64mul { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Mul, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64divs { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64DivS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64divu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64DivU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64rems { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64RemS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64remu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64RemU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64and { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64And, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64or { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Or, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64xor { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Xor, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64shl { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Shl, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64shrs { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64ShrS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64shru { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64ShrU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64rotl { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Rotl, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64rotr { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Rotr, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + // This is no benchmark. It merely exist to have an easy way to pretty print the curently + // configured `Schedule` during benchmark development. + // It can be outputed using the following command: + // cargo run --manifest-path=bin/node/cli/Cargo.toml --release \ + // --features runtime-benchmarks -- benchmark --dev --execution=native \ + // -p pallet_contracts -e print_schedule --no-median-slopes --no-min-squares + #[extra] + print_schedule { + #[cfg(feature = "std")] + { + let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0); + let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - + T::WeightInfo::on_initialize_per_queue_item(0); + let weight_limit = T::DeletionWeightLimit::get(); + let queue_depth: u64 = T::DeletionQueueDepth::get().into(); + println!("{:#?}", Schedule::::default()); + println!("###############################################"); + println!("Lazy deletion throughput per block (empty queue, full queue): {}, {}", + weight_limit / weight_per_key, + (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key, + ); + } + #[cfg(not(feature = "std"))] + return Err("Run this bench with a native runtime in order to see the schedule."); + }: {} } #[cfg(test)] @@ -1605,11 +2460,16 @@ mod tests { } } + create_test!(on_initialize); + create_test!(on_initialize_per_trie_key); + create_test!(on_initialize_per_queue_item); + create_test!(update_schedule); - create_test!(put_code); + create_test!(instantiate_with_code); create_test!(instantiate); create_test!(call); create_test!(claim_surcharge); + create_test!(seal_caller); create_test!(seal_address); create_test!(seal_gas_left); @@ -1640,6 +2500,8 @@ mod tests { create_test!(seal_transfer); create_test!(seal_call); create_test!(seal_call_per_transfer_input_output_kb); + create_test!(seal_instantiate); + create_test!(seal_instantiate_per_input_output_salt_kb); create_test!(seal_clear_storage); create_test!(seal_hash_sha2_256); create_test!(seal_hash_sha2_256_per_kb); @@ -1649,4 +2511,56 @@ mod tests { create_test!(seal_hash_blake2_256_per_kb); create_test!(seal_hash_blake2_128); create_test!(seal_hash_blake2_128_per_kb); + + create_test!(instr_i64const); + create_test!(instr_i64load); + create_test!(instr_i64store); + create_test!(instr_select); + create_test!(instr_if); + create_test!(instr_br); + create_test!(instr_br_if); + create_test!(instr_br_table); + create_test!(instr_br_table_per_entry); + create_test!(instr_call); + create_test!(instr_call_indirect); + create_test!(instr_call_indirect_per_param); + create_test!(instr_local_get); + create_test!(instr_local_set); + create_test!(instr_local_tee); + create_test!(instr_global_get); + create_test!(instr_global_set); + create_test!(instr_memory_current); + create_test!(instr_memory_grow); + create_test!(instr_i64clz); + create_test!(instr_i64ctz); + create_test!(instr_i64popcnt); + create_test!(instr_i64eqz); + create_test!(instr_i64extendsi32); + create_test!(instr_i64extendui32); + create_test!(instr_i32wrapi64); + create_test!(instr_i64eq); + create_test!(instr_i64ne); + create_test!(instr_i64lts); + create_test!(instr_i64ltu); + create_test!(instr_i64gts); + create_test!(instr_i64gtu); + create_test!(instr_i64les); + create_test!(instr_i64leu); + create_test!(instr_i64ges); + create_test!(instr_i64geu); + create_test!(instr_i64add); + create_test!(instr_i64sub); + create_test!(instr_i64mul); + create_test!(instr_i64divs); + create_test!(instr_i64divu); + create_test!(instr_i64rems); + create_test!(instr_i64remu); + create_test!(instr_i64and); + create_test!(instr_i64or); + create_test!(instr_i64xor); + create_test!(instr_i64shl); + create_test!(instr_i64shrs); + create_test!(instr_i64shru); + create_test!(instr_i64rotl); + create_test!(instr_i64rotr); } diff --git a/frame/contracts/src/benchmarking/sandbox.rs b/frame/contracts/src/benchmarking/sandbox.rs new file mode 100644 index 0000000000000..a97fcc2b113ec --- /dev/null +++ b/frame/contracts/src/benchmarking/sandbox.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///! For instruction benchmarking we do no instantiate a full contract but merely the +///! sandbox to execute the wasm code. This is because we do not need the full +///! environment that provides the seal interface as imported functions. + +use super::{ + Config, + code::WasmModule, +}; +use sp_core::crypto::UncheckedFrom; +use sp_sandbox::{EnvironmentDefinitionBuilder, Instance, Memory}; + +/// Minimal execution environment without any exported functions. +pub struct Sandbox { + instance: Instance<()>, + _memory: Option, +} + +impl Sandbox { + /// Invoke the `call` function of a contract code and panic on any execution error. + pub fn invoke(&mut self) { + self.instance.invoke("call", &[], &mut ()).unwrap(); + } +} + +impl From<&WasmModule> for Sandbox +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Creates an instance from the supplied module and supplies as much memory + /// to the instance as the module declares as imported. + fn from(module: &WasmModule) -> Self { + let mut env_builder = EnvironmentDefinitionBuilder::new(); + let memory = module.add_memory(&mut env_builder); + let instance = Instance::new(&module.code, &env_builder, &mut ()) + .expect("Failed to create benchmarking Sandbox instance"); + Self { + instance, + _memory: memory, + } + } +} diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs new file mode 100644 index 0000000000000..ef6e034791753 --- /dev/null +++ b/frame/contracts/src/chain_extension.rs @@ -0,0 +1,395 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A mechanism for runtime authors to augment the functionality of contracts. +//! +//! The runtime is able to call into any contract and retrieve the result using +//! [`bare_call`](crate::Module::bare_call). This already allows customization of runtime +//! behaviour by user generated code (contracts). However, often it is more straightforward +//! to allow the reverse behaviour: The contract calls into the runtime. We call the latter +//! one a "chain extension" because it allows the chain to extend the set of functions that are +//! callable by a contract. +//! +//! In order to create a chain extension the runtime author implements the [`ChainExtension`] +//! trait and declares it in this pallet's [configuration Trait](crate::Config). All types +//! required for this endeavour are defined or re-exported in this module. There is an +//! implementation on `()` which can be used to signal that no chain extension is available. +//! +//! # Security +//! +//! The chain author alone is responsible for the security of the chain extension. +//! This includes avoiding the exposure of exploitable functions and charging the +//! appropriate amount of weight. In order to do so benchmarks must be written and the +//! [`charge_weight`](Environment::charge_weight) function must be called **before** +//! carrying out any action that causes the consumption of the chargeable weight. +//! It cannot be overstated how delicate of a process the creation of a chain extension +//! is. Check whether using [`bare_call`](crate::Module::bare_call) suffices for the +//! use case at hand. +//! +//! # Benchmarking +//! +//! The builtin contract callable functions that pallet-contracts provides all have +//! benchmarks that determine the correct weight that an invocation of these functions +//! induces. In order to be able to charge the correct weight for the functions defined +//! by a chain extension benchmarks must be written, too. In the near future this crate +//! will provide the means for easier creation of those specialized benchmarks. + +use crate::{ + Error, + wasm::{Runtime, RuntimeToken}, +}; +use codec::Decode; +use frame_support::weights::Weight; +use sp_runtime::DispatchError; +use sp_std::{ + marker::PhantomData, + vec::Vec, +}; + +pub use frame_system::Config as SysConfig; +pub use pallet_contracts_primitives::ReturnFlags; +pub use sp_core::crypto::UncheckedFrom; +pub use crate::{Config, exec::Ext}; +pub use state::Init as InitState; + +/// Result that returns a [`DispatchError`] on error. +pub type Result = sp_std::result::Result; + +/// A trait used to extend the set of contract callable functions. +/// +/// In order to create a custom chain extension this trait must be implemented and supplied +/// to the pallet contracts configuration trait as the associated type of the same name. +/// Consult the [module documentation](self) for a general explanation of chain extensions. +pub trait ChainExtension { + /// Call the chain extension logic. + /// + /// This is the only function that needs to be implemented in order to write a + /// chain extensions. It is called whenever a contract calls the `seal_call_chain_extension` + /// imported wasm function. + /// + /// # Parameters + /// - `func_id`: The first argument to `seal_call_chain_extension`. Usually used to + /// determine which function to realize. + /// - `env`: Access to the remaining arguments and the execution environment. + /// + /// # Return + /// + /// In case of `Err` the contract execution is immediately suspended and the passed error + /// is returned to the caller. Otherwise the value of [`RetVal`] determines the exit + /// behaviour. + fn call(func_id: u32, env: Environment) -> Result + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>; + + /// Determines whether chain extensions are enabled for this chain. + /// + /// The default implementation returns `true`. Therefore it is not necessary to overwrite + /// this function when implementing a chain extension. In case of `false` the deployment of + /// a contract that references `seal_call_chain_extension` will be denied and calling this + /// function will return [`NoChainExtension`](Error::NoChainExtension) without first calling + /// into [`call`](Self::call). + fn enabled() -> bool { + true + } +} + +/// Implementation that indicates that no chain extension is available. +impl ChainExtension for () { + fn call(_func_id: u32, mut _env: Environment) -> Result + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + // Never called since [`Self::enabled()`] is set to `false`. Because we want to + // avoid panics at all costs we supply a sensible error value here instead + // of an `unimplemented!`. + Err(Error::::NoChainExtension.into()) + } + + fn enabled() -> bool { + false + } +} + +/// Determines the exit behaviour and return value of a chain extension. +pub enum RetVal { + /// The chain extensions returns the supplied value to its calling contract. + Converging(u32), + /// The control does **not** return to the calling contract. + /// + /// Use this to stop the execution of the contract when the chain extension returns. + /// The semantic is the same as for calling `seal_return`: The control returns to + /// the caller of the currently executing contract yielding the supplied buffer and + /// flags. + Diverging{flags: ReturnFlags, data: Vec}, +} + +/// Grants the chain extension access to its parameters and execution environment. +/// +/// It uses the typestate pattern to enforce the correct usage of the parameters passed +/// to the chain extension. +pub struct Environment<'a, 'b, E: Ext, S: state::State> { + /// The actual data of this type. + inner: Inner<'a, 'b, E>, + /// `S` is only used in the type system but never as value. + phantom: PhantomData, +} + +/// Functions that are available in every state of this type. +impl<'a, 'b, E: Ext, S: state::State> Environment<'a, 'b, E, S> +where + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + /// Charge the passed `amount` of weight from the overall limit. + /// + /// It returns `Ok` when there the remaining weight budget is larger than the passed + /// `weight`. It returns `Err` otherwise. In this case the chain extension should + /// abort the execution and pass through the error. + /// + /// # Note + /// + /// Weight is synonymous with gas in substrate. + pub fn charge_weight(&mut self, amount: Weight) -> Result<()> { + self.inner.runtime.charge_gas(RuntimeToken::ChainExtension(amount)).map(|_| ()) + } + + /// Grants access to the execution environment of the current contract call. + /// + /// Consult the functions on the returned type before re-implementing those functions. + pub fn ext(&mut self) -> &mut E { + self.inner.runtime.ext() + } +} + +/// Functions that are only available in the initial state of this type. +/// +/// Those are the functions that determine how the arguments to the chain extensions +/// should be consumed. +impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { + /// Creates a new environment for consumption by a chain extension. + /// + /// It is only available to this crate because only the wasm runtime module needs to + /// ever create this type. Chain extensions merely consume it. + pub(crate) fn new( + runtime: &'a mut Runtime::<'b, E>, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + output_len_ptr: u32, + ) -> Self { + Environment { + inner: Inner { + runtime, + input_ptr, + input_len, + output_ptr, + output_len_ptr, + }, + phantom: PhantomData, + } + } + + /// Use all arguments as integer values. + pub fn only_in(self) -> Environment<'a, 'b, E, state::OnlyIn> { + Environment { + inner: self.inner, + phantom: PhantomData, + } + } + + /// Use input arguments as integer and output arguments as pointer to a buffer. + pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, state::PrimInBufOut> { + Environment { + inner: self.inner, + phantom: PhantomData, + } + } + + /// Use input and output arguments as pointers to a buffer. + pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, state::BufInBufOut> { + Environment { + inner: self.inner, + phantom: PhantomData, + } + } +} + +/// Functions to use the input arguments as integers. +impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { + /// The `input_ptr` argument. + pub fn val0(&self) -> u32 { + self.inner.input_ptr + } + + /// The `input_len` argument. + pub fn val1(&self) -> u32 { + self.inner.input_len + } +} + +/// Functions to use the output arguments as integers. +impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { + /// The `output_ptr` argument. + pub fn val2(&self) -> u32 { + self.inner.output_ptr + } + + /// The `output_len_ptr` argument. + pub fn val3(&self) -> u32 { + self.inner.output_len_ptr + } +} + +/// Functions to use the input arguments as pointer to a buffer. +impl<'a, 'b, E: Ext, S: state::BufIn> Environment<'a, 'b, E, S> +where + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + /// Reads `min(max_len, in_len)` from contract memory. + /// + /// This does **not** charge any weight. The caller must make sure that the an + /// appropriate amount of weight is charged **before** reading from contract memory. + /// The reason for that is that usually the costs for reading data and processing + /// said data cannot be separated in a benchmark. Therefore a chain extension would + /// charge the overall costs either using `max_len` (worst case approximation) or using + /// [`in_len()`](Self::in_len). + pub fn read(&self, max_len: u32) -> Result> { + self.inner.runtime.read_sandbox_memory( + self.inner.input_ptr, + self.inner.input_len.min(max_len), + ) + } + + /// Reads `min(buffer.len(), in_len) from contract memory. + /// + /// This takes a mutable pointer to a buffer fills it with data and shrinks it to + /// the size of the actual data. Apart from supporting pre-allocated buffers it is + /// equivalent to to [`read()`](Self::read). + pub fn read_into(&self, buffer: &mut &mut [u8]) -> Result<()> { + let len = buffer.len(); + let sliced = { + let buffer = core::mem::take(buffer); + &mut buffer[..len.min(self.inner.input_len as usize)] + }; + self.inner.runtime.read_sandbox_memory_into_buf( + self.inner.input_ptr, + sliced, + )?; + *buffer = sliced; + Ok(()) + } + + /// Reads `in_len` from contract memory and scale decodes it. + /// + /// This function is secure and recommended for all input types of fixed size + /// as long as the cost of reading the memory is included in the overall already charged + /// weight of the chain extension. This should usually be the case when fixed input types + /// are used. Non fixed size types (like everything using `Vec`) usually need to use + /// [`in_len()`](Self::in_len) in order to properly charge the necessary weight. + pub fn read_as(&mut self) -> Result { + self.inner.runtime.read_sandbox_memory_as( + self.inner.input_ptr, + self.inner.input_len, + ) + } + + /// The length of the input as passed in as `input_len`. + /// + /// A chain extension would use this value to calculate the dynamic part of its + /// weight. For example a chain extension that calculates the hash of some passed in + /// bytes would use `in_len` to charge the costs of hashing that amount of bytes. + /// This also subsumes the act of copying those bytes as a benchmarks measures both. + pub fn in_len(&self) -> u32 { + self.inner.input_len + } +} + +/// Functions to use the output arguments as pointer to a buffer. +impl<'a, 'b, E: Ext, S: state::BufOut> Environment<'a, 'b, E, S> +where + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + /// Write the supplied buffer to contract memory. + /// + /// If the contract supplied buffer is smaller than the passed `buffer` an `Err` is returned. + /// If `allow_skip` is set to true the contract is allowed to skip the copying of the buffer + /// by supplying the guard value of [`u32::max_value()`] as `out_ptr`. The + /// `weight_per_byte` is only charged when the write actually happens and is not skipped or + /// failed due to a too small output buffer. + pub fn write( + &mut self, + buffer: &[u8], + allow_skip: bool, + weight_per_byte: Option, + ) -> Result<()> { + self.inner.runtime.write_sandbox_output( + self.inner.output_ptr, + self.inner.output_len_ptr, + buffer, + allow_skip, + |len| { + weight_per_byte.map(|w| RuntimeToken::ChainExtension(w.saturating_mul(len.into()))) + }, + ) + } +} + +/// The actual data of an `Environment`. +/// +/// All data is put into this struct to easily pass it around as part of the typestate +/// pattern. Also it creates the opportunity to box this struct in the future in case it +/// gets too large. +struct Inner<'a, 'b, E: Ext> { + /// The runtime contains all necessary functions to interact with the running contract. + runtime: &'a mut Runtime::<'b, E>, + /// Verbatim argument passed to `seal_call_chain_extension`. + input_ptr: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. + input_len: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. + output_ptr: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. + output_len_ptr: u32, +} + +/// Private submodule with public types to prevent other modules from naming them. +mod state { + pub trait State {} + + pub trait PrimIn: State {} + pub trait PrimOut: State {} + pub trait BufIn: State {} + pub trait BufOut: State {} + + pub enum Init {} + pub enum OnlyIn {} + pub enum PrimInBufOut {} + pub enum BufInBufOut {} + + impl State for Init {} + impl State for OnlyIn {} + impl State for PrimInBufOut {} + impl State for BufInBufOut {} + + impl PrimIn for OnlyIn {} + impl PrimOut for OnlyIn {} + impl PrimIn for PrimInBufOut {} + impl BufOut for PrimInBufOut {} + impl BufIn for BufInBufOut {} + impl BufOut for BufInBufOut {} +} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f93f262d821e9..bbb972b2ed2e2 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1,42 +1,47 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::{ - CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, - TrieId, BalanceOf, ContractInfo, TrieIdGenerator, - gas::GasMeter, rent, storage, Error, ContractInfoOf + CodeHash, Event, RawEvent, Config, Module as Contracts, + TrieId, BalanceOf, ContractInfo, gas::GasMeter, rent::Rent, storage::{self, Storage}, + Error, ContractInfoOf, Schedule, +}; +use sp_core::crypto::UncheckedFrom; +use sp_std::{ + prelude::*, + marker::PhantomData, }; -use sp_std::prelude::*; use sp_runtime::traits::{Bounded, Zero, Convert, Saturating}; use frame_support::{ - dispatch::DispatchError, - traits::{ExistenceRequirement, Currency, Time, Randomness}, + dispatch::{DispatchResult, DispatchError}, + traits::{ExistenceRequirement, Currency, Time, Randomness, Get}, weights::Weight, ensure, StorageMap, }; use pallet_contracts_primitives::{ErrorOrigin, ExecError, ExecReturnValue, ExecResult, ReturnFlags}; -pub type AccountIdOf = ::AccountId; -pub type MomentOf = <::Time as Time>::Moment; -pub type SeedOf = ::Hash; -pub type BlockNumberOf = ::BlockNumber; +pub type AccountIdOf = ::AccountId; +pub type MomentOf = <::Time as Time>::Moment; +pub type SeedOf = ::Hash; +pub type BlockNumberOf = ::BlockNumber; pub type StorageKey = [u8; 32]; /// A type that represents a topic of an event. At the moment a hash is used. -pub type TopicOf = ::Hash; +pub type TopicOf = ::Hash; /// Describes whether we deal with a contract or a plain account. pub enum TransactorKind { @@ -53,7 +58,7 @@ pub enum TransactorKind { /// This interface is specialized to an account of the executing code, so all /// operations are implicitly performed on that account. pub trait Ext { - type T: Trait; + type T: Config; /// Returns the storage entry of the executing account by the given `key`. /// @@ -63,7 +68,7 @@ pub trait Ext { /// Sets the storage entry by the given key to the specified value. If `value` is `None` then /// the storage entry is deleted. - fn set_storage(&mut self, key: StorageKey, value: Option>); + fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult; /// Instantiate a contract from the given code. /// @@ -71,10 +76,11 @@ pub trait Ext { /// transferred from this to the newly created account (also known as endowment). fn instantiate( &mut self, - code: &CodeHash, + code: CodeHash, value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, + salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; /// Transfer some amount of funds into the specified account. @@ -82,7 +88,7 @@ pub trait Ext { &mut self, to: &AccountIdOf, value: BalanceOf, - ) -> Result<(), DispatchError>; + ) -> DispatchResult; /// Transfer all funds to `beneficiary` and delete the contract. /// @@ -94,7 +100,7 @@ pub trait Ext { fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError>; + ) -> DispatchResult; /// Call (possibly transferring some amount of funds) into the specified account. fn call( @@ -118,7 +124,7 @@ pub trait Ext { code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(), &'static str>; + ) -> DispatchResult; /// Returns a reference to the account id of the caller. fn caller(&self) -> &AccountIdOf; @@ -165,90 +171,121 @@ pub trait Ext { /// Returns the price for the specified amount of weight. fn get_weight_price(&self, weight: Weight) -> BalanceOf; + + /// Get a reference to the schedule used by the current call. + fn schedule(&self) -> &Schedule; } -/// Loader is a companion of the `Vm` trait. It loads an appropriate abstract -/// executable to be executed by an accompanying `Vm` implementation. -pub trait Loader { - type Executable; - - /// Load the initializer portion of the code specified by the `code_hash`. This - /// executable is called upon instantiation. - fn load_init(&self, code_hash: &CodeHash) -> Result; - /// Load the main portion of the code specified by the `code_hash`. This executable - /// is called for each call to a contract. - fn load_main(&self, code_hash: &CodeHash) -> Result; +/// Describes the different functions that can be exported by an [`Executable`]. +pub enum ExportedFunction { + /// The constructor function which is executed on deployment of a contract. + Constructor, + /// The function which is executed when a contract is called. + Call, } -/// A trait that represent a virtual machine. -/// -/// You can view a virtual machine as something that takes code, an input data buffer, -/// queries it and/or performs actions on the given `Ext` and optionally -/// returns an output data buffer. The type of code depends on the particular virtual machine. +/// A trait that represents something that can be executed. /// -/// Execution of code can end by either implicit termination (that is, reached the end of -/// executable), explicit termination via returning a buffer or termination due to a trap. -pub trait Vm { - type Executable; +/// In the on-chain environment this would be represented by a wasm module. This trait exists in +/// order to be able to mock the wasm logic for testing. +pub trait Executable: Sized { + /// Load the executable from storage. + fn from_storage(code_hash: CodeHash, schedule: &Schedule) -> Result; + + /// Load the module from storage without re-instrumenting it. + /// + /// A code module is re-instrumented on-load when it was originally instrumented with + /// an older schedule. This skips this step for cases where the code storage is + /// queried for purposes other than execution. + fn from_storage_noinstr(code_hash: CodeHash) -> Result; + /// Decrements the refcount by one and deletes the code if it drops to zero. + fn drop_from_storage(self); + + /// Increment the refcount by one. Fails if the code does not exist on-chain. + fn add_user(code_hash: CodeHash) -> DispatchResult; + + /// Decrement the refcount by one and remove the code when it drops to zero. + fn remove_user(code_hash: CodeHash); + + /// Execute the specified exported function and return the result. + /// + /// When the specified function is `Constructor` the executable is stored and its + /// refcount incremented. + /// + /// # Note + /// + /// This functions expects to be executed in a storage transaction that rolls back + /// all of its emitted storage changes. fn execute>( - &self, - exec: &Self::Executable, + self, ext: E, + function: &ExportedFunction, input_data: Vec, gas_meter: &mut GasMeter, ) -> ExecResult; + + /// The code hash of the executable. + fn code_hash(&self) -> &CodeHash; + + /// The storage that is occupied by the instrumented executable and its pristine source. + /// + /// The returned size is already divided by the number of users who share the code. + /// + /// # Note + /// + /// This works with the current in-memory value of refcount. When calling any contract + /// without refetching this from storage the result can be inaccurate as it might be + /// working with a stale value. Usually this inaccuracy is tolerable. + fn occupied_storage(&self) -> u32; } -pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { - pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, +pub struct ExecutionContext<'a, T: Config + 'a, E> { + pub caller: Option<&'a ExecutionContext<'a, T, E>>, pub self_account: T::AccountId, pub self_trie_id: Option, pub depth: usize, - pub config: &'a Config, - pub vm: &'a V, - pub loader: &'a L, + pub schedule: &'a Schedule, pub timestamp: MomentOf, pub block_number: T::BlockNumber, + _phantom: PhantomData, } -impl<'a, T, E, V, L> ExecutionContext<'a, T, V, L> +impl<'a, T, E> ExecutionContext<'a, T, E> where - T: Trait, - L: Loader, - V: Vm, + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + E: Executable, { /// Create the top level execution context. /// /// The specified `origin` address will be used as `sender` for. The `origin` must be a regular /// account (not a contract). - pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { + pub fn top_level(origin: T::AccountId, schedule: &'a Schedule) -> Self { ExecutionContext { caller: None, self_trie_id: None, self_account: origin, depth: 0, - config: &cfg, - vm: &vm, - loader: &loader, + schedule, timestamp: T::Time::now(), block_number: >::block_number(), + _phantom: Default::default(), } } fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: TrieId) - -> ExecutionContext<'b, T, V, L> + -> ExecutionContext<'b, T, E> { ExecutionContext { caller: Some(self), self_trie_id: Some(trie_id), self_account: dest, depth: self.depth + 1, - config: self.config, - vm: self.vm, - loader: self.loader, + schedule: self.schedule, timestamp: self.timestamp.clone(), block_number: self.block_number.clone(), + _phantom: Default::default(), } } @@ -260,41 +297,41 @@ where gas_meter: &mut GasMeter, input_data: Vec, ) -> ExecResult { - if self.depth == self.config.max_depth as usize { + if self.depth == T::MaxDepth::get() as usize { Err(Error::::MaxCallDepthReached)? } - // Assumption: `collect_rent` doesn't collide with overlay because - // `collect_rent` will be done on first call and destination contract and balance - // cannot be changed before the first call - // We do not allow 'calling' plain accounts. For transfering value - // `seal_transfer` must be used. - let contract = if let Some(ContractInfo::Alive(info)) = rent::collect_rent::(&dest) { - info - } else { - Err(Error::::NotCallable)? - }; + let contract = >::get(&dest) + .and_then(|contract| contract.get_alive()) + .ok_or(Error::::NotCallable)?; + + let executable = E::from_storage(contract.code_hash, &self.schedule)?; + + // This charges the rent and denies access to a contract that is in need of + // eviction by returning `None`. We cannot evict eagerly here because those + // changes would be rolled back in case this contract is called by another + // contract. + // See: https://github.com/paritytech/substrate/issues/6439#issuecomment-648754324 + let contract = Rent::::charge(&dest, contract, executable.occupied_storage())? + .ok_or(Error::::NotCallable)?; let transactor_kind = self.transactor_kind(); let caller = self.self_account.clone(); self.with_nested_context(dest.clone(), contract.trie_id.clone(), |nested| { if value > BalanceOf::::zero() { - transfer( + transfer::( TransferCause::Call, transactor_kind, &caller, &dest, value, - nested, )? } - let executable = nested.loader.load_main(&contract.code_hash) - .map_err(|_| Error::::CodeNotFound)?; - let output = nested.vm.execute( - &executable, + let output = executable.execute( nested.new_call_context(caller, value), + &ExportedFunction::Call, input_data, gas_meter, ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; @@ -306,68 +343,80 @@ where &mut self, endowment: BalanceOf, gas_meter: &mut GasMeter, - code_hash: &CodeHash, + executable: E, input_data: Vec, + salt: &[u8], ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { - if self.depth == self.config.max_depth as usize { + if self.depth == T::MaxDepth::get() as usize { Err(Error::::MaxCallDepthReached)? } let transactor_kind = self.transactor_kind(); let caller = self.self_account.clone(); - let dest = T::DetermineContractAddress::contract_address_for( - code_hash, - &input_data, - &caller, - ); + let dest = Contracts::::contract_address(&caller, executable.code_hash(), salt); - // TrieId has not been generated yet and storage is empty since contract is new. - // - // Generate it now. - let dest_trie_id = ::TrieIdGenerator::trie_id(&dest); + let output = frame_support::storage::with_transaction(|| { + // Generate the trie id in a new transaction to only increment the counter on success. + let dest_trie_id = Storage::::generate_trie_id(&dest); - let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { - storage::place_contract::( - &dest, - nested - .self_trie_id - .clone() - .expect("the nested context always has to have self_trie_id"), - code_hash.clone() - )?; - - // Send funds unconditionally here. If the `endowment` is below existential_deposit - // then error will be returned here. - transfer( - TransferCause::Instantiate, - transactor_kind, - &caller, - &dest, - endowment, - nested, - )?; - - let executable = nested.loader.load_init(&code_hash) - .map_err(|_| Error::::CodeNotFound)?; - let output = nested.vm - .execute( - &executable, + let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + Storage::::place_contract( + &dest, + nested + .self_trie_id + .clone() + .expect("the nested context always has to have self_trie_id"), + executable.code_hash().clone() + )?; + + // Send funds unconditionally here. If the `endowment` is below existential_deposit + // then error will be returned here. + transfer::( + TransferCause::Instantiate, + transactor_kind, + &caller, + &dest, + endowment, + )?; + + // Cache the value before calling into the constructor because that + // consumes the value. If the constructor creates additional contracts using + // the same code hash we still charge the "1 block rent" as if they weren't + // spawned. This is OK as overcharging is always safe. + let occupied_storage = executable.occupied_storage(); + + let output = executable.execute( nested.new_call_context(caller.clone(), endowment), + &ExportedFunction::Constructor, input_data, gas_meter, ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; - // We need each contract that exists to be above the subsistence threshold - // in order to keep up the guarantuee that we always leave a tombstone behind - // with the exception of a contract that called `seal_terminate`. - if T::Currency::total_balance(&dest) < nested.config.subsistence_threshold() { - Err(Error::::NewContractNotFunded)? - } + // We need to re-fetch the contract because changes are written to storage + // eagerly during execution. + let contract = >::get(&dest) + .and_then(|contract| contract.get_alive()) + .ok_or(Error::::NotCallable)?; - // Deposit an instantiation event. - deposit_event::(vec![], RawEvent::Instantiated(caller.clone(), dest.clone())); + // Collect the rent for the first block to prevent the creation of very large + // contracts that never intended to pay for even one block. + // This also makes sure that it is above the subsistence threshold + // in order to keep up the guarantuee that we always leave a tombstone behind + // with the exception of a contract that called `seal_terminate`. + Rent::::charge(&dest, contract, occupied_storage)? + .ok_or(Error::::NewContractNotFunded)?; - Ok(output) + // Deposit an instantiation event. + deposit_event::(vec![], RawEvent::Instantiated(caller.clone(), dest.clone())); + + Ok(output) + }); + + use frame_support::storage::TransactionOutcome::*; + match output { + Ok(_) => Commit(output), + Err(_) => Rollback(output), + } })?; Ok((dest, output)) @@ -377,7 +426,7 @@ where &'b mut self, caller: T::AccountId, value: BalanceOf, - ) -> CallContext<'b, 'a, T, V, L> { + ) -> CallContext<'b, 'a, T, E> { let timestamp = self.timestamp.clone(); let block_number = self.block_number.clone(); CallContext { @@ -386,13 +435,14 @@ where value_transferred: value, timestamp, block_number, + _phantom: Default::default(), } } /// Execute the given closure within a nested execution context. fn with_nested_context(&mut self, dest: T::AccountId, trie_id: TrieId, func: F) -> ExecResult - where F: FnOnce(&mut ExecutionContext) -> ExecResult + where F: FnOnce(&mut ExecutionContext) -> ExecResult { use frame_support::storage::TransactionOutcome::*; let mut nested = self.nested(dest, trie_id); @@ -437,14 +487,16 @@ enum TransferCause { /// is specified as `Terminate`. Otherwise, any transfer that would bring the sender below the /// subsistence threshold (for contracts) or the existential deposit (for plain accounts) /// results in an error. -fn transfer<'a, T: Trait, V: Vm, L: Loader>( +fn transfer( cause: TransferCause, origin: TransactorKind, transactor: &T::AccountId, dest: &T::AccountId, value: BalanceOf, - ctx: &mut ExecutionContext<'a, T, V, L>, -) -> Result<(), DispatchError> { +) -> DispatchResult +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ use self::TransferCause::*; use self::TransactorKind::*; @@ -455,7 +507,7 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( (_, Contract) => { ensure!( T::Currency::total_balance(transactor).saturating_sub(value) >= - ctx.config.subsistence_threshold(), + Contracts::::subsistence_threshold(), Error::::BelowSubsistenceThreshold, ); ExistenceRequirement::KeepAlive @@ -480,19 +532,20 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( /// implies that the control won't be returned to the contract anymore, but there is still some code /// on the path of the return from that call context. Therefore, care must be taken in these /// situations. -struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { - ctx: &'a mut ExecutionContext<'b, T, V, L>, +struct CallContext<'a, 'b: 'a, T: Config + 'b, E> { + ctx: &'a mut ExecutionContext<'b, T, E>, caller: T::AccountId, value_transferred: BalanceOf, timestamp: MomentOf, block_number: T::BlockNumber, + _phantom: PhantomData, } -impl<'a, 'b: 'a, T, E, V, L> Ext for CallContext<'a, 'b, T, V, L> +impl<'a, 'b: 'a, T, E> Ext for CallContext<'a, 'b, T, E> where - T: Trait + 'b, - V: Vm, - L: Loader, + T: Config + 'b, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + E: Executable, { type T = T; @@ -503,81 +556,81 @@ where expect can't fail;\ qed", ); - storage::read_contract_storage(trie_id, key) + Storage::::read(trie_id, key) } - fn set_storage(&mut self, key: StorageKey, value: Option>) { + fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult { let trie_id = self.ctx.self_trie_id.as_ref().expect( "`ctx.self_trie_id` points to an alive contract within the `CallContext`;\ it cannot be `None`;\ expect can't fail;\ qed", ); - if let Err(storage::ContractAbsentError) = - storage::write_contract_storage::(&self.ctx.self_account, trie_id, &key, value) - { - panic!( - "the contract must be in the alive state within the `CallContext`;\ - the contract cannot be absent in storage; - write_contract_storage cannot return `None`; - qed" - ); - } + // write panics if the passed account is not alive. + // the contract must be in the alive state within the `CallContext`;\ + // the contract cannot be absent in storage; + // write cannot return `None`; + // qed + Storage::::write(&self.ctx.self_account, trie_id, &key, value) } fn instantiate( &mut self, - code_hash: &CodeHash, + code_hash: CodeHash, endowment: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, + salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { - self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) + let executable = E::from_storage(code_hash, &self.ctx.schedule)?; + let result = self.ctx.instantiate(endowment, gas_meter, executable, input_data, salt)?; + Ok(result) } fn transfer( &mut self, to: &T::AccountId, value: BalanceOf, - ) -> Result<(), DispatchError> { - transfer( + ) -> DispatchResult { + transfer::( TransferCause::Call, TransactorKind::Contract, &self.ctx.self_account.clone(), to, value, - self.ctx, ) } fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError> { + ) -> DispatchResult { let self_id = self.ctx.self_account.clone(); let value = T::Currency::free_balance(&self_id); if let Some(caller_ctx) = self.ctx.caller { if caller_ctx.is_live(&self_id) { - return Err(DispatchError::Other( - "Cannot terminate a contract that is present on the call stack", - )); + return Err(Error::::ReentranceDenied.into()); } } - transfer( + transfer::( TransferCause::Terminate, TransactorKind::Contract, &self_id, beneficiary, value, - self.ctx, )?; - let self_trie_id = self.ctx.self_trie_id.as_ref().expect( - "this function is only invoked by in the context of a contract;\ - a contract has a trie id;\ - this can't be None; qed", - ); - storage::destroy_contract::(&self_id, self_trie_id); - Ok(()) + if let Some(ContractInfo::Alive(info)) = ContractInfoOf::::take(&self_id) { + Storage::::queue_trie_for_deletion(&info)?; + E::remove_user(info.code_hash); + Contracts::::deposit_event(RawEvent::Terminated(self_id, beneficiary.clone())); + Ok(()) + } else { + panic!( + "this function is only invoked by in the context of a contract;\ + this contract is therefore alive;\ + qed" + ); + } } fn call( @@ -596,16 +649,14 @@ where code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(), &'static str> { + ) -> DispatchResult { if let Some(caller_ctx) = self.ctx.caller { if caller_ctx.is_live(&self.ctx.self_account) { - return Err( - "Cannot perform restoration of a contract that is present on the call stack", - ); + return Err(Error::::ReentranceDenied.into()); } } - let result = crate::rent::restore_to::( + let result = Rent::::restore_to( self.ctx.self_account.clone(), dest.clone(), code_hash.clone(), @@ -651,23 +702,23 @@ where } fn minimum_balance(&self) -> BalanceOf { - self.ctx.config.existential_deposit + T::Currency::minimum_balance() } fn tombstone_deposit(&self) -> BalanceOf { - self.ctx.config.tombstone_deposit + T::TombstoneDeposit::get() } fn deposit_event(&mut self, topics: Vec, data: Vec) { deposit_event::( topics, - RawEvent::ContractExecution(self.ctx.self_account.clone(), data) + RawEvent::ContractEmitted(self.ctx.self_account.clone(), data) ); } fn set_rent_allowance(&mut self, rent_allowance: BalanceOf) { if let Err(storage::ContractAbsentError) = - storage::set_rent_allowance::(&self.ctx.self_account, rent_allowance) + Storage::::set_rent_allowance(&self.ctx.self_account, rent_allowance) { panic!( "`self_account` points to an alive contract within the `CallContext`; @@ -677,28 +728,32 @@ where } fn rent_allowance(&self) -> BalanceOf { - storage::rent_allowance::(&self.ctx.self_account) + Storage::::rent_allowance(&self.ctx.self_account) .unwrap_or_else(|_| >::max_value()) // Must never be triggered actually } fn block_number(&self) -> T::BlockNumber { self.block_number } fn max_value_size(&self) -> u32 { - self.ctx.config.max_value_size + T::MaxValueSize::get() } fn get_weight_price(&self, weight: Weight) -> BalanceOf { T::WeightPrice::convert(weight) } + + fn schedule(&self) -> &Schedule { + &self.ctx.schedule + } } -fn deposit_event( +fn deposit_event( topics: Vec, event: Event, ) { >::deposit_event_indexed( &*topics, - ::Event::from(event).into(), + ::Event::from(event).into(), ) } @@ -709,32 +764,34 @@ fn deposit_event( /// wasm VM code. #[cfg(test)] mod tests { - use super::{ - BalanceOf, Event, ExecResult, ExecutionContext, Ext, Loader, - RawEvent, Vm, ReturnFlags, ExecError, ErrorOrigin - }; + use super::*; use crate::{ - gas::GasMeter, tests::{ExtBuilder, Test, MetaEvent}, - exec::ExecReturnValue, CodeHash, Config, + gas::GasMeter, tests::{ExtBuilder, Test, Event as MetaEvent}, gas::Gas, - storage, Error + storage::Storage, + tests::{ + ALICE, BOB, CHARLIE, + test_utils::{place_contract, set_balance, get_balance}, + }, + Error, }; - use crate::tests::test_utils::{place_contract, set_balance, get_balance}; use sp_runtime::DispatchError; use assert_matches::assert_matches; - use std::{cell::RefCell, collections::HashMap, marker::PhantomData, rc::Rc}; + use std::{cell::RefCell, collections::HashMap, rc::Rc}; - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; + type MockContext<'a> = ExecutionContext<'a, Test, MockExecutable>; const GAS_LIMIT: Gas = 10_000_000_000; + thread_local! { + static LOADER: RefCell = RefCell::new(MockLoader::default()); + } + fn events() -> Vec> { >::events() .into_iter() .filter_map(|meta| match meta.event { - MetaEvent::contracts(contract_event) => Some(contract_event), + MetaEvent::pallet_contracts(contract_event) => Some(contract_event), _ => None, }) .collect() @@ -747,80 +804,74 @@ mod tests { } #[derive(Clone)] - struct MockExecutable<'a>(Rc ExecResult + 'a>); - - impl<'a> MockExecutable<'a> { - fn new(f: impl Fn(MockCtx) -> ExecResult + 'a) -> Self { - MockExecutable(Rc::new(f)) - } - } + struct MockExecutable(Rc ExecResult + 'static>, CodeHash); - struct MockLoader<'a> { - map: HashMap, MockExecutable<'a>>, + #[derive(Default)] + struct MockLoader { + map: HashMap, MockExecutable>, counter: u64, } - impl<'a> MockLoader<'a> { - fn empty() -> Self { - MockLoader { - map: HashMap::new(), - counter: 0, - } - } - - fn insert(&mut self, f: impl Fn(MockCtx) -> ExecResult + 'a) -> CodeHash { - // Generate code hashes as monotonically increasing values. - let code_hash = ::Hash::from_low_u64_be(self.counter); - - self.counter += 1; - self.map.insert(code_hash, MockExecutable::new(f)); - code_hash + impl MockLoader { + fn insert(f: impl Fn(MockCtx) -> ExecResult + 'static) -> CodeHash { + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); + // Generate code hashes as monotonically increasing values. + let hash = ::Hash::from_low_u64_be(loader.counter); + loader.counter += 1; + loader.map.insert(hash, MockExecutable (Rc::new(f), hash.clone())); + hash + }) } } - struct MockVm<'a> { - _marker: PhantomData<&'a ()>, - } + impl Executable for MockExecutable { + fn from_storage( + code_hash: CodeHash, + _schedule: &Schedule + ) -> Result { + Self::from_storage_noinstr(code_hash) + } - impl<'a> MockVm<'a> { - fn new() -> Self { - MockVm { _marker: PhantomData } + fn from_storage_noinstr(code_hash: CodeHash) -> Result { + LOADER.with(|loader| { + loader.borrow_mut() + .map + .get(&code_hash) + .cloned() + .ok_or(Error::::CodeNotFound.into()) + }) } - } - impl<'a> Loader for MockLoader<'a> { - type Executable = MockExecutable<'a>; + fn drop_from_storage(self) {} - fn load_init(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") - } - fn load_main(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") + fn add_user(_code_hash: CodeHash) -> DispatchResult { + Ok(()) } - } - impl<'a> Vm for MockVm<'a> { - type Executable = MockExecutable<'a>; + fn remove_user(_code_hash: CodeHash) {} fn execute>( - &self, - exec: &MockExecutable, + self, mut ext: E, + _function: &ExportedFunction, input_data: Vec, gas_meter: &mut GasMeter, ) -> ExecResult { - (exec.0)(MockCtx { + (self.0)(MockCtx { ext: &mut ext, input_data, gas_meter, }) } + + fn code_hash(&self) -> &CodeHash { + &self.1 + } + + fn occupied_storage(&self) -> u32 { + 0 + } } fn exec_success() -> ExecResult { @@ -829,32 +880,29 @@ mod tests { #[test] fn it_works() { + thread_local! { + static TEST_DATA: RefCell> = RefCell::new(vec![0]); + } + let value = Default::default(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let data = vec![]; - - let vm = MockVm::new(); - - let test_data = Rc::new(RefCell::new(vec![0usize])); - - let mut loader = MockLoader::empty(); - let exec_ch = loader.insert(|_ctx| { - test_data.borrow_mut().push(1); + let exec_ch = MockLoader::insert(|_ctx| { + TEST_DATA.with(|data| data.borrow_mut().push(1)); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); place_contract(&BOB, exec_ch); assert_matches!( - ctx.call(BOB, value, &mut gas_meter, data), + ctx.call(BOB, value, &mut gas_meter, vec![]), Ok(_) ); }); - assert_eq!(&*test_data.borrow(), &vec![0, 1]); + TEST_DATA.with(|data| assert_eq!(*data.borrow(), vec![0, 1])); } #[test] @@ -864,22 +912,16 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - let loader = MockLoader::empty(); - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); set_balance(&origin, 100); set_balance(&dest, 0); - super::transfer( + super::transfer::( super::TransferCause::Call, super::TransactorKind::PlainAccount, &origin, &dest, 55, - &mut ctx, ).unwrap(); assert_eq!(get_balance(&origin), 45); @@ -894,21 +936,19 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( + let return_ch = MockLoader::insert( |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) ); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(origin.clone(), &schedule); place_contract(&BOB, return_ch); set_balance(&origin, 100); - set_balance(&dest, 0); + let balance = get_balance(&dest); let output = ctx.call( - dest, + dest.clone(), 55, &mut GasMeter::::new(GAS_LIMIT), vec![], @@ -916,7 +956,9 @@ mod tests { assert!(!output.is_success()); assert_eq!(get_balance(&origin), 100); - assert_eq!(get_balance(&dest), 0); + + // the rent is still charged + assert!(get_balance(&dest) < balance); }); } @@ -927,21 +969,15 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - let loader = MockLoader::empty(); - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); set_balance(&origin, 0); - let result = super::transfer( + let result = super::transfer::( super::TransferCause::Call, super::TransactorKind::PlainAccount, &origin, &dest, 100, - &mut ctx, ); assert_eq!( @@ -959,16 +995,13 @@ mod tests { // is returned from the execution context. let origin = ALICE; let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( + let return_ch = MockLoader::insert( |_| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) ); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(origin, &schedule); place_contract(&BOB, return_ch); let result = ctx.call( @@ -990,16 +1023,13 @@ mod tests { // is returned from the execution context. let origin = ALICE; let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( + let return_ch = MockLoader::insert( |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) ); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(origin, &schedule); place_contract(&BOB, return_ch); let result = ctx.call( @@ -1017,17 +1047,15 @@ mod tests { #[test] fn input_data_to_call() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { + let input_data_ch = MockLoader::insert(|ctx| { assert_eq!(ctx.input_data, &[1, 2, 3, 4]); exec_success() }); // This one tests passing the input data into a contract via call. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); place_contract(&BOB, input_data_ch); let result = ctx.call( @@ -1042,25 +1070,25 @@ mod tests { #[test] fn input_data_to_instantiate() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { + let input_data_ch = MockLoader::insert(|ctx| { assert_eq!(ctx.input_data, &[1, 2, 3, 4]); exec_success() }); // This one tests passing the input data into a contract via instantiate. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let subsistence = Contracts::::subsistence_threshold(); + let mut ctx = MockContext::top_level(ALICE, &schedule); - set_balance(&ALICE, 100); + set_balance(&ALICE, subsistence * 10); let result = ctx.instantiate( - cfg.subsistence_threshold(), + subsistence * 3, &mut GasMeter::::new(GAS_LIMIT), - &input_data_ch, + MockExecutable::from_storage(input_data_ch, &schedule).unwrap(), vec![1, 2, 3, 4], + &[], ); assert_matches!(result, Ok(_)); }); @@ -1070,35 +1098,36 @@ mod tests { fn max_depth() { // This test verifies that when we reach the maximal depth creation of an // yet another context fails. + thread_local! { + static REACHED_BOTTOM: RefCell = RefCell::new(false); + } let value = Default::default(); - let reached_bottom = RefCell::new(false); - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let recurse_ch = loader.insert(|ctx| { + let recurse_ch = MockLoader::insert(|ctx| { // Try to call into yourself. let r = ctx.ext.call(&BOB, 0, ctx.gas_meter, vec![]); - let mut reached_bottom = reached_bottom.borrow_mut(); - if !*reached_bottom { - // We are first time here, it means we just reached bottom. - // Verify that we've got proper error and set `reached_bottom`. - assert_eq!( - r, - Err(Error::::MaxCallDepthReached.into()) - ); - *reached_bottom = true; - } else { - // We just unwinding stack here. - assert_matches!(r, Ok(_)); - } + REACHED_BOTTOM.with(|reached_bottom| { + let mut reached_bottom = reached_bottom.borrow_mut(); + if !*reached_bottom { + // We are first time here, it means we just reached bottom. + // Verify that we've got proper error and set `reached_bottom`. + assert_eq!( + r, + Err(Error::::MaxCallDepthReached.into()) + ); + *reached_bottom = true; + } else { + // We just unwinding stack here. + assert_matches!(r, Ok(_)); + } + }); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&BOB, 1); place_contract(&BOB, recurse_ch); @@ -1118,15 +1147,16 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - - let witnessed_caller_bob = RefCell::new(None::); - let witnessed_caller_charlie = RefCell::new(None::); + thread_local! { + static WITNESSED_CALLER_BOB: RefCell>> = RefCell::new(None); + static WITNESSED_CALLER_CHARLIE: RefCell>> = RefCell::new(None); + } - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { + let bob_ch = MockLoader::insert(|ctx| { // Record the caller for bob. - *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); + WITNESSED_CALLER_BOB.with(|caller| + *caller.borrow_mut() = Some(ctx.ext.caller().clone()) + ); // Call into CHARLIE contract. assert_matches!( @@ -1135,21 +1165,22 @@ mod tests { ); exec_success() }); - let charlie_ch = loader.insert(|ctx| { + let charlie_ch = MockLoader::insert(|ctx| { // Record the caller for charlie. - *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); + WITNESSED_CALLER_CHARLIE.with(|caller| + *caller.borrow_mut() = Some(ctx.ext.caller().clone()) + ); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(origin.clone(), &schedule); place_contract(&dest, bob_ch); place_contract(&CHARLIE, charlie_ch); let result = ctx.call( - dest, + dest.clone(), 0, &mut GasMeter::::new(GAS_LIMIT), vec![], @@ -1158,16 +1189,13 @@ mod tests { assert_matches!(result, Ok(_)); }); - assert_eq!(&*witnessed_caller_bob.borrow(), &Some(origin)); - assert_eq!(&*witnessed_caller_charlie.borrow(), &Some(dest)); + WITNESSED_CALLER_BOB.with(|caller| assert_eq!(*caller.borrow(), Some(origin))); + WITNESSED_CALLER_CHARLIE.with(|caller| assert_eq!(*caller.borrow(), Some(dest))); } #[test] fn address_returns_proper_values() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { + let bob_ch = MockLoader::insert(|ctx| { // Verify that address matches BOB. assert_eq!(*ctx.ext.address(), BOB); @@ -1178,14 +1206,14 @@ mod tests { ); exec_success() }); - let charlie_ch = loader.insert(|ctx| { + let charlie_ch = MockLoader::insert(|ctx| { assert_eq!(*ctx.ext.address(), CHARLIE); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); @@ -1202,21 +1230,19 @@ mod tests { #[test] fn refuse_instantiate_with_value_below_existential_deposit() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| exec_success()); + let dummy_ch = MockLoader::insert(|_| exec_success()); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); assert_matches!( ctx.instantiate( 0, // <- zero endowment &mut GasMeter::::new(GAS_LIMIT), - &dummy_ch, + MockExecutable::from_storage(dummy_ch, &schedule).unwrap(), vec![], + &[], ), Err(_) ); @@ -1225,31 +1251,29 @@ mod tests { #[test] fn instantiation_work_with_success_output() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( + let dummy_ch = MockLoader::insert( |_| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( ctx.instantiate( 100, &mut GasMeter::::new(GAS_LIMIT), - &dummy_ch, + MockExecutable::from_storage(dummy_ch, &schedule).unwrap(), vec![], + &[], ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); + assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); assert_eq!(&events(), &[ RawEvent::Instantiated(ALICE, instantiated_contract_address) ]); @@ -1258,51 +1282,47 @@ mod tests { #[test] fn instantiation_fails_with_failing_output() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( + let dummy_ch = MockLoader::insert( |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( ctx.instantiate( 100, &mut GasMeter::::new(GAS_LIMIT), - &dummy_ch, + MockExecutable::from_storage(dummy_ch, &schedule).unwrap(), vec![], + &[], ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); // Check that the account has not been created. - assert!(storage::code_hash::(&instantiated_contract_address).is_err()); + assert!(Storage::::code_hash(&instantiated_contract_address).is_err()); assert!(events().is_empty()); }); } #[test] fn instantiation_from_contract() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| exec_success()); - let instantiated_contract_address = Rc::new(RefCell::new(None::)); - let instantiator_ch = loader.insert({ + let dummy_ch = MockLoader::insert(|_| exec_success()); + let instantiated_contract_address = Rc::new(RefCell::new(None::>)); + let instantiator_ch = MockLoader::insert({ let dummy_ch = dummy_ch.clone(); let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx| { // Instantiate a contract and save it's address in `instantiated_contract_address`. let (address, output) = ctx.ext.instantiate( - &dummy_ch, - Config::::subsistence_threshold_uncached(), + dummy_ch, + Contracts::::subsistence_threshold() * 3, ctx.gas_meter, - vec![] + vec![], + &[48, 49, 50], ).unwrap(); *instantiated_contract_address.borrow_mut() = address.into(); @@ -1311,10 +1331,9 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - set_balance(&ALICE, 1000); - set_balance(&BOB, 100); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); + set_balance(&ALICE, Contracts::::subsistence_threshold() * 100); place_contract(&BOB, instantiator_ch); assert_matches!( @@ -1326,7 +1345,7 @@ mod tests { // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); + assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); assert_eq!(&events(), &[ RawEvent::Instantiated(BOB, instantiated_contract_address) ]); @@ -1335,22 +1354,20 @@ mod tests { #[test] fn instantiation_traps() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( + let dummy_ch = MockLoader::insert( |_| Err("It's a trap!".into()) ); - let instantiator_ch = loader.insert({ + let instantiator_ch = MockLoader::insert({ let dummy_ch = dummy_ch.clone(); move |ctx| { // Instantiate a contract and save it's address in `instantiated_contract_address`. assert_matches!( ctx.ext.instantiate( - &dummy_ch, + dummy_ch, 15u64, ctx.gas_meter, - vec![] + vec![], + &[], ), Err(ExecError { error: DispatchError::Other("It's a trap!"), @@ -1363,8 +1380,8 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&ALICE, 1000); set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); @@ -1382,11 +1399,7 @@ mod tests { #[test] fn termination_from_instantiate_fails() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - - let terminate_ch = loader.insert(|ctx| { + let terminate_ch = MockLoader::insert(|ctx| { ctx.ext.terminate(&ALICE).unwrap(); exec_success() }); @@ -1395,18 +1408,19 @@ mod tests { .existential_deposit(15) .build() .execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); set_balance(&ALICE, 1000); assert_eq!( ctx.instantiate( 100, &mut GasMeter::::new(GAS_LIMIT), - &terminate_ch, + MockExecutable::from_storage(terminate_ch, &schedule).unwrap(), vec![], + &[], ), - Err(Error::::NewContractNotFunded.into()) + Err(Error::::NotCallable.into()) ); assert_eq!( @@ -1418,25 +1432,27 @@ mod tests { #[test] fn rent_allowance() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let rent_allowance_ch = loader.insert(|ctx| { + let rent_allowance_ch = MockLoader::insert(|ctx| { + let subsistence = Contracts::::subsistence_threshold(); + let allowance = subsistence * 3; assert_eq!(ctx.ext.rent_allowance(), >::max_value()); - ctx.ext.set_rent_allowance(10); - assert_eq!(ctx.ext.rent_allowance(), 10); + ctx.ext.set_rent_allowance(allowance); + assert_eq!(ctx.ext.rent_allowance(), allowance); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - set_balance(&ALICE, 100); + let subsistence = Contracts::::subsistence_threshold(); + let schedule = Contracts::current_schedule(); + let mut ctx = MockContext::top_level(ALICE, &schedule); + set_balance(&ALICE, subsistence * 10); let result = ctx.instantiate( - cfg.subsistence_threshold(), + subsistence * 5, &mut GasMeter::::new(GAS_LIMIT), - &rent_allowance_ch, + MockExecutable::from_storage(rent_allowance_ch, &schedule).unwrap(), vec![], + &[], ); assert_matches!(result, Ok(_)); }); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 0828a220c0406..4bdfcdd577119 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -1,24 +1,26 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::Trait; +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::Config; use sp_std::marker::PhantomData; use sp_runtime::traits::Zero; -use frame_support::dispatch::{ - DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo, +use frame_support::{ + dispatch::{DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo}, + weights::Weight, }; use pallet_contracts_primitives::ExecError; @@ -26,12 +28,12 @@ use pallet_contracts_primitives::ExecError; use std::{any::Any, fmt::Debug}; // Gas is essentially the same as weight. It is a 1 to 1 correspondence. -pub type Gas = frame_support::weights::Weight; +pub type Gas = Weight; #[must_use] #[derive(Debug, PartialEq, Eq)] pub enum GasMeterResult { - Proceed, + Proceed(ChargedAmount), OutOfGas, } @@ -39,11 +41,20 @@ impl GasMeterResult { pub fn is_out_of_gas(&self) -> bool { match *self { GasMeterResult::OutOfGas => true, - GasMeterResult::Proceed => false, + GasMeterResult::Proceed(_) => false, } } } +#[derive(Debug, PartialEq, Eq)] +pub struct ChargedAmount(Gas); + +impl ChargedAmount { + pub fn amount(&self) -> Gas { + self.0 + } +} + #[cfg(not(test))] pub trait TestAuxiliaries {} #[cfg(not(test))] @@ -60,7 +71,7 @@ impl TestAuxiliaries for T {} /// Implementing type is expected to be super lightweight hence `Copy` (`Clone` is added /// for consistency). If inlined there should be no observable difference compared /// to a hand-written code. -pub trait Token: Copy + Clone + TestAuxiliaries { +pub trait Token: Copy + Clone + TestAuxiliaries { /// Metadata type, which the token can require for calculating the amount /// of gas to charge. Can be a some configuration type or /// just the `()`. @@ -84,7 +95,7 @@ pub struct ErasedToken { pub token: Box, } -pub struct GasMeter { +pub struct GasMeter { gas_limit: Gas, /// Amount of gas left from initial gas limit. Can reach zero. gas_left: Gas, @@ -92,7 +103,7 @@ pub struct GasMeter { #[cfg(test)] tokens: Vec, } -impl GasMeter { +impl GasMeter { pub fn new(gas_limit: Gas) -> Self { GasMeter { gas_limit, @@ -138,17 +149,18 @@ impl GasMeter { self.gas_left = new_value.unwrap_or_else(Zero::zero); match new_value { - Some(_) => GasMeterResult::Proceed, + Some(_) => GasMeterResult::Proceed(ChargedAmount(amount)), None => GasMeterResult::OutOfGas, } } - // Account for not fully used gas. - // - // This can be used after dispatching a runtime call to refund gas that was not - // used by the dispatchable. - pub fn refund(&mut self, gas: Gas) { - self.gas_left = self.gas_left.saturating_add(gas).max(self.gas_limit); + /// Refund previously charged gas back to the gas meter. + /// + /// This can be used if a gas worst case estimation must be charged before + /// performing a certain action. This way the difference can be refundend when + /// the worst case did not happen. + pub fn refund(&mut self, amount: ChargedAmount) { + self.gas_left = self.gas_left.saturating_add(amount.0).min(self.gas_limit) } /// Allocate some amount of gas and perform some work with @@ -190,12 +202,15 @@ impl GasMeter { } /// Turn this GasMeter into a DispatchResult that contains the actually used gas. - pub fn into_dispatch_result(self, result: Result) -> DispatchResultWithPostInfo + pub fn into_dispatch_result( + self, result: Result, + base_weight: Weight, + ) -> DispatchResultWithPostInfo where E: Into, { let post_info = PostDispatchInfo { - actual_weight: Some(self.gas_spent()), + actual_weight: Some(self.gas_spent().saturating_add(base_weight)), pays_fee: Default::default(), }; diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 9f1656f35f6ee..b20db8dd8cd84 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -1,24 +1,25 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! # Contract Module //! //! The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. //! -//! - [`contract::Trait`](./trait.Trait.html) +//! - [`contract::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -58,10 +59,11 @@ //! //! ### Dispatchable functions //! -//! * `put_code` - Stores the given binary Wasm code into the chain's storage and returns its `code_hash`. -//! * `instantiate` - Deploys a new contract from the given `code_hash`, optionally transferring some balance. -//! This instantiates a new smart contract account and calls its contract deploy handler to -//! initialize the contract. +//! * `instantiate_with_code` - Deploys a new contract from the supplied wasm binary, optionally transferring +//! some balance. This instantiates a new smart contract account and calls its contract deploy +//! handler to initialize the contract. +//! * `instantiate` - The same as `instantiate_with_code` but instead of uploading new code an +//! existing `code_hash` is supplied. //! * `call` - Makes a call to an account, optionally transferring some balance. //! //! ## Usage @@ -88,19 +90,24 @@ mod wasm; mod rent; mod benchmarking; mod schedule; -mod weight_info; + +pub mod chain_extension; +pub mod weights; #[cfg(test)] mod tests; -use crate::exec::ExecutionContext; -use crate::wasm::{WasmLoader, WasmVm}; - -pub use crate::gas::{Gas, GasMeter}; -pub use crate::wasm::ReturnCode as RuntimeReturnCode; -pub use crate::weight_info::WeightInfo; -pub use crate::schedule::{Schedule, HostFnWeights, InstructionWeights}; - +pub use crate::{ + gas::{Gas, GasMeter}, + wasm::{ReturnCode as RuntimeReturnCode, PrefabWasmModule}, + weights::WeightInfo, + schedule::{Schedule, HostFnWeights, InstructionWeights, Limits}, +}; +use crate::{ + exec::{ExecutionContext, Executable}, + rent::Rent, + storage::Storage, +}; use sp_core::crypto::UncheckedFrom; use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; use codec::{Codec, Encode, Decode}; @@ -108,37 +115,33 @@ use sp_runtime::{ traits::{ Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, Convert, Saturating, }, - RuntimeDebug, + RuntimeDebug, Perbill, }; use frame_support::{ decl_module, decl_event, decl_storage, decl_error, ensure, - parameter_types, storage::child::ChildInfo, + storage::child::ChildInfo, dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{OnUnbalanced, Currency, Get, Time, Randomness}, + weights::Pays, }; -use frame_system::{ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root, Module as System}; use pallet_contracts_primitives::{ RentProjectionResult, GetStorageResult, ContractAccessError, ContractExecResult, ExecResult, }; use frame_support::weights::Weight; -pub type CodeHash = ::Hash; +pub type CodeHash = ::Hash; pub type TrieId = Vec; -/// A function that generates an `AccountId` for a contract upon instantiation. -pub trait ContractAddressFor { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; -} - /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account #[derive(Encode, Decode, RuntimeDebug)] -pub enum ContractInfo { +pub enum ContractInfo { Alive(AliveContractInfo), Tombstone(TombstoneContractInfo), } -impl ContractInfo { +impl ContractInfo { /// If contract is alive then return some alive info pub fn get_alive(self) -> Option> { if let ContractInfo::Alive(alive) = self { @@ -191,7 +194,7 @@ impl ContractInfo { } pub type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; + RawAliveContractInfo, BalanceOf, ::BlockNumber>; /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. @@ -203,15 +206,16 @@ pub struct RawAliveContractInfo { /// /// It is a sum of each key-value pair stored by this contract. pub storage_size: u32, - /// The number of key-value pairs that have values of zero length. - /// The condition `empty_pair_count ≤ total_pair_count` always holds. - pub empty_pair_count: u32, /// The total number of key-value pairs in storage of this contract. - pub total_pair_count: u32, + pub pair_count: u32, /// The code associated with a given account. pub code_hash: CodeHash, /// Pay rent at most up to this value. pub rent_allowance: Balance, + /// The amount of rent that was payed by the contract over its whole lifetime. + /// + /// A restored contract starts with a value of zero just like a new contract. + pub rent_payed: Balance, /// Last block rent has been payed. pub deduct_block: BlockNumber, /// Last block child storage has been written. @@ -231,7 +235,7 @@ pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { } pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; + RawTombstoneContractInfo<::Hash, ::Hashing>; #[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub struct RawTombstoneContractInfo(H, PhantomData); @@ -251,73 +255,18 @@ where } } -impl From> for ContractInfo { +impl From> for ContractInfo { fn from(alive_info: AliveContractInfo) -> Self { Self::Alive(alive_info) } } -/// Get a trie id (trie id must be unique and collision resistant depending upon its context). -/// Note that it is different than encode because trie id should be collision resistant -/// (being a proper unique identifier). -pub trait TrieIdGenerator { - /// Get a trie id for an account, using reference to parent account trie id to ensure - /// uniqueness of trie id. - /// - /// The implementation must ensure every new trie id is unique: two consecutive calls with the - /// same parameter needs to return different trie id values. - fn trie_id(account_id: &AccountId) -> TrieId; -} - -/// Get trie id from `account_id`. -pub struct TrieIdFromParentCounter(PhantomData); - -/// This generator uses inner counter for account id and applies the hash over `AccountId + -/// accountid_counter`. -impl TrieIdGenerator for TrieIdFromParentCounter -where - T::AccountId: AsRef<[u8]> -{ - fn trie_id(account_id: &T::AccountId) -> TrieId { - // Note that skipping a value due to error is not an issue here. - // We only need uniqueness, not sequence. - let new_seed = AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut buf = Vec::new(); - buf.extend_from_slice(account_id.as_ref()); - buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - T::Hashing::hash(&buf[..]).as_ref().into() - } -} - pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; - -parameter_types! { - /// A reasonable default value for [`Trait::SignedClaimedHandicap`]. - pub const DefaultSignedClaimHandicap: u32 = 2; - /// A reasonable default value for [`Trait::TombstoneDeposit`]. - pub const DefaultTombstoneDeposit: u32 = 16; - /// A reasonable default value for [`Trait::StorageSizeOffset`]. - pub const DefaultStorageSizeOffset: u32 = 8; - /// A reasonable default value for [`Trait::RentByteFee`]. - pub const DefaultRentByteFee: u32 = 4; - /// A reasonable default value for [`Trait::RentDepositOffset`]. - pub const DefaultRentDepositOffset: u32 = 1000; - /// A reasonable default value for [`Trait::SurchargeReward`]. - pub const DefaultSurchargeReward: u32 = 150; - /// A reasonable default value for [`Trait::MaxDepth`]. - pub const DefaultMaxDepth: u32 = 32; - /// A reasonable default value for [`Trait::MaxValueSize`]. - pub const DefaultMaxValueSize: u32 = 16_384; -} + <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { type Time: Time; type Randomness: Randomness; @@ -325,13 +274,7 @@ pub trait Trait: frame_system::Trait { type Currency: Currency; /// The overarching event type. - type Event: From> + Into<::Event>; - - /// A function type to get the contract address given the instantiator. - type DetermineContractAddress: ContractAddressFor, Self::AccountId>; - - /// trie id generator - type TrieIdGenerator: TrieIdGenerator; + type Event: From> + Into<::Event>; /// Handler for rent payments. type RentPayment: OnUnbalanced>; @@ -345,24 +288,35 @@ pub trait Trait: frame_system::Trait { /// The minimum amount required to generate a tombstone. type TombstoneDeposit: Get>; - /// A size offset for an contract. A just created account with untouched storage will have that - /// much of storage from the perspective of the state rent. + /// The balance every contract needs to deposit to stay alive indefinitely. + /// + /// This is different from the [`Self::TombstoneDeposit`] because this only needs to be + /// deposited while the contract is alive. Costs for additional storage are added to + /// this base cost. /// /// This is a simple way to ensure that contracts with empty storage eventually get deleted by /// making them pay rent. This creates an incentive to remove them early in order to save rent. - type StorageSizeOffset: Get; - - /// Price of a byte of storage per one block interval. Should be greater than 0. - type RentByteFee: Get>; + type DepositPerContract: Get>; - /// The amount of funds a contract should deposit in order to offset - /// the cost of one byte. + /// The balance a contract needs to deposit per storage byte to stay alive indefinitely. /// /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, /// then it would pay 500 BU/day. - type RentDepositOffset: Get>; + type DepositPerStorageByte: Get>; + + /// The balance a contract needs to deposit per storage item to stay alive indefinitely. + /// + /// It works the same as [`Self::DepositPerStorageByte`] but for storage items. + type DepositPerStorageItem: Get>; + + /// The fraction of the deposit that should be used as rent per block. + /// + /// When a contract hasn't enough balance deposited to stay alive indefinitely it needs + /// to pay per block for the storage it consumes that is not covered by the deposit. + /// This determines how high this rent payment is per block as a fraction of the deposit. + type RentFraction: Get; /// Reward that is received by the party whose touch has led /// to removal of a contract. @@ -381,34 +335,24 @@ pub trait Trait: frame_system::Trait { /// Describes the weights of the dispatchables of this module and is also used to /// construct a default cost schedule. type WeightInfo: WeightInfo; -} -/// Simple contract address determiner. -/// -/// Address calculated from the code (of the constructor), input data to the constructor, -/// and the account id that requested the account creation. -/// -/// Formula: `blake2_256(blake2_256(code) + blake2_256(data) + origin)` -pub struct SimpleAddressDeterminer(PhantomData); -impl ContractAddressFor, T::AccountId> for SimpleAddressDeterminer -where - T::AccountId: UncheckedFrom + AsRef<[u8]> -{ - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &T::AccountId) -> T::AccountId { - let data_hash = T::Hashing::hash(data); + /// Type that allows the runtime authors to add new host functions for a contract to call. + type ChainExtension: chain_extension::ChainExtension; - let mut buf = Vec::new(); - buf.extend_from_slice(code_hash.as_ref()); - buf.extend_from_slice(data_hash.as_ref()); - buf.extend_from_slice(origin.as_ref()); + /// The maximum number of tries that can be queued for deletion. + type DeletionQueueDepth: Get; - UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) - } + /// The maximum amount of weight that can be consumed per block for lazy trie removal. + type DeletionWeightLimit: Get; } decl_error! { /// Error for the contracts module. - pub enum Error for Module { + pub enum Error for Module + where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { /// A new schedule must have a greater version than the current one. InvalidScheduleVersion, /// An origin must be signed or inherent and auxiliary sender only provided on inherent. @@ -443,7 +387,8 @@ decl_error! { /// The contract that was called is either no contract at all (a plain account) /// or is a tombstone. NotCallable, - /// The code supplied to `put_code` exceeds the limit specified in the current schedule. + /// The code supplied to `instantiate_with_code` exceeds the limit specified in the + /// current schedule. CodeTooLarge, /// No code could be found at the supplied code hash. CodeNotFound, @@ -455,12 +400,51 @@ decl_error! { ContractTrapped, /// The size defined in `T::MaxValueSize` was exceeded. ValueTooLarge, + /// The action performed is not allowed while the contract performing it is already + /// on the call stack. Those actions are contract self destruction and restoration + /// of a tombstone. + ReentranceDenied, + /// `seal_input` was called twice from the same contract execution context. + InputAlreadyRead, + /// The subject passed to `seal_random` exceeds the limit. + RandomSubjectTooLong, + /// The amount of topics passed to `seal_deposit_events` exceeds the limit. + TooManyTopics, + /// The topics passed to `seal_deposit_events` contains at least one duplicate. + DuplicateTopics, + /// The chain does not provide a chain extension. Calling the chain extension results + /// in this error. Note that this usually shouldn't happen as deploying such contracts + /// is rejected. + NoChainExtension, + /// Removal of a contract failed because the deletion queue is full. + /// + /// This can happen when either calling [`Module::claim_surcharge`] or `seal_terminate`. + /// The queue is filled by deleting contracts and emptied by a fixed amount each block. + /// Trying again during another block is the only way to resolve this issue. + DeletionQueueFull, + /// A contract could not be evicted because it has enough balance to pay rent. + /// + /// This can be returned from [`Module::claim_surcharge`] because the target + /// contract has enough balance to pay for its rent. + ContractNotEvictable, + /// A storage modification exhausted the 32bit type that holds the storage size. + /// + /// This can either happen when the accumulated storage in bytes is too large or + /// when number of storage items is too large. + StorageExhausted, + /// A contract with the same AccountId already exists. + DuplicateContract, } } decl_module! { /// Contracts module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call + where + origin: T::Origin, + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { type Error = Error; /// Number of block delay an extrinsic claim surcharge has. @@ -472,25 +456,35 @@ decl_module! { /// The minimum amount required to generate a tombstone. const TombstoneDeposit: BalanceOf = T::TombstoneDeposit::get(); - /// A size offset for an contract. A just created account with untouched storage will have that - /// much of storage from the perspective of the state rent. + /// The balance every contract needs to deposit to stay alive indefinitely. /// - /// This is a simple way to ensure that contracts with empty storage eventually get deleted - /// by making them pay rent. This creates an incentive to remove them early in order to save - /// rent. - const StorageSizeOffset: u32 = T::StorageSizeOffset::get(); - - /// Price of a byte of storage per one block interval. Should be greater than 0. - const RentByteFee: BalanceOf = T::RentByteFee::get(); + /// This is different from the [`Self::TombstoneDeposit`] because this only needs to be + /// deposited while the contract is alive. Costs for additional storage are added to + /// this base cost. + /// + /// This is a simple way to ensure that contracts with empty storage eventually get deleted by + /// making them pay rent. This creates an incentive to remove them early in order to save rent. + const DepositPerContract: BalanceOf = T::DepositPerContract::get(); - /// The amount of funds a contract should deposit in order to offset - /// the cost of one byte. + /// The balance a contract needs to deposit per storage byte to stay alive indefinitely. /// /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, /// then it would pay 500 BU/day. - const RentDepositOffset: BalanceOf = T::RentDepositOffset::get(); + const DepositPerStorageByte: BalanceOf = T::DepositPerStorageByte::get(); + + /// The balance a contract needs to deposit per storage item to stay alive indefinitely. + /// + /// It works the same as [`Self::DepositPerStorageByte`] but for storage items. + const DepositPerStorageItem: BalanceOf = T::DepositPerStorageItem::get(); + + /// The fraction of the deposit that should be used as rent per block. + /// + /// When a contract hasn't enough balance deposited to stay alive indefinitely it needs + /// to pay per block for the storage it consumes that is not covered by the deposit. + /// This determines how high this rent payment is per block as a fraction of the deposit. + const RentFraction: Perbill = T::RentFraction::get(); /// Reward that is received by the party whose touch has led /// to removal of a contract. @@ -503,8 +497,24 @@ decl_module! { /// The maximum size of a storage value in bytes. A reasonable default is 16 KiB. const MaxValueSize: u32 = T::MaxValueSize::get(); + /// The maximum number of tries that can be queued for deletion. + const DeletionQueueDepth: u32 = T::DeletionQueueDepth::get(); + + /// The maximum amount of weight that can be consumed per block for lazy trie removal. + const DeletionWeightLimit: Weight = T::DeletionWeightLimit::get(); + fn deposit_event() = default; + fn on_initialize() -> Weight { + // We do not want to go above the block limit and rather avoid lazy deletion + // in that case. This should only happen on runtime upgrades. + let weight_limit = T::BlockWeights::get().max_block + .saturating_sub(System::::block_weight().total()) + .min(T::DeletionWeightLimit::get()); + Storage::::process_deletion_queue_batch(weight_limit) + .saturating_add(T::WeightInfo::on_initialize()) + } + /// Updates the schedule for metering contracts. /// /// The schedule must have a greater version than the stored schedule. @@ -521,23 +531,6 @@ decl_module! { Ok(()) } - /// Stores the given binary Wasm code into the chain's storage and returns its `codehash`. - /// You can instantiate contracts only with stored code. - #[weight = T::WeightInfo::put_code(code.len() as u32 / 1024)] - pub fn put_code( - origin, - code: Vec - ) -> DispatchResult { - ensure_signed(origin)?; - let schedule = >::current_schedule(); - ensure!(code.len() as u32 <= schedule.max_code_size, Error::::CodeTooLarge); - let result = wasm::save_code::(code, &schedule); - if let Ok(code_hash) = result { - Self::deposit_event(RawEvent::CodeStored(code_hash)); - } - result.map(|_| ()).map_err(Into::into) - } - /// Makes a call to an account, optionally transferring some balance. /// /// * If the account is a smart-contract account, the associated code will be @@ -556,48 +549,111 @@ decl_module! { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut gas_meter = GasMeter::new(gas_limit); - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { ctx.call(dest, value, gas_meter, data) }); - gas_meter.into_dispatch_result(result) + gas_meter.into_dispatch_result(result, T::WeightInfo::call()) } - /// Instantiates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. + /// Instantiates a new contract from the supplied `code` optionally transferring + /// some balance. + /// + /// This is the only function that can deploy new code to the chain. + /// + /// # Parameters + /// + /// * `endowment`: The balance to transfer from the `origin` to the newly created contract. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `code`: The contract code to deploy in raw bytes. + /// * `data`: The input data to pass to the contract constructor. + /// * `salt`: Used for the address derivation. See [`Self::contract_address`]. /// /// Instantiation is executed as follows: /// - /// - The destination address is computed based on the sender and hash of the code. + /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that code. + /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. + /// - The destination address is computed based on the sender, code_hash and the salt. /// - The smart-contract account is created at the computed address. - /// - The `ctor_code` is executed in the context of the newly-created account. Buffer returned - /// after the execution is saved as the `code` of the account. That code will be invoked - /// upon any call received by this account. - /// - The contract is initialized. - #[weight = T::WeightInfo::instantiate(data.len() as u32 / 1024).saturating_add(*gas_limit)] + /// - The `endowment` is transferred to the new account. + /// - The `deploy` function is executed in the context of the newly-created account. + #[weight = + T::WeightInfo::instantiate_with_code( + code.len() as u32 / 1024, + salt.len() as u32 / 1024, + ) + .saturating_add(*gas_limit) + ] + pub fn instantiate_with_code( + origin, + #[compact] endowment: BalanceOf, + #[compact] gas_limit: Gas, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let schedule = >::current_schedule(); + let code_len = code.len() as u32; + ensure!(code_len <= schedule.limits.code_size, Error::::CodeTooLarge); + let mut gas_meter = GasMeter::new(gas_limit); + let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { + let executable = PrefabWasmModule::from_code(code, &schedule)?; + let result = ctx.instantiate(endowment, gas_meter, executable, data, &salt) + .map(|(_address, output)| output)?; + Ok(result) + }); + gas_meter.into_dispatch_result( + result, + T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024) + ) + } + + /// Instantiates a contract from a previously deployed wasm binary. + /// + /// This function is identical to [`Self::instantiate_with_code`] but without the + /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary + /// must be supplied. + #[weight = + T::WeightInfo::instantiate(salt.len() as u32 / 1024) + .saturating_add(*gas_limit) + ] pub fn instantiate( origin, #[compact] endowment: BalanceOf, #[compact] gas_limit: Gas, code_hash: CodeHash, - data: Vec + data: Vec, + salt: Vec, ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let mut gas_meter = GasMeter::new(gas_limit); - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.instantiate(endowment, gas_meter, &code_hash, data) - .map(|(_address, output)| output) + let executable = PrefabWasmModule::from_storage(code_hash, &ctx.schedule)?; + let result = ctx.instantiate(endowment, gas_meter, executable, data, &salt) + .map(|(_address, output)| output)?; + Ok(result) }); - gas_meter.into_dispatch_result(result) + gas_meter.into_dispatch_result( + result, + T::WeightInfo::instantiate(salt.len() as u32 / 1024) + ) } - /// Allows block producers to claim a small reward for evicting a contract. If a block producer - /// fails to do so, a regular users will be allowed to claim the reward. + /// Allows block producers to claim a small reward for evicting a contract. If a block + /// producer fails to do so, a regular users will be allowed to claim the reward. /// - /// If contract is not evicted as a result of this call, no actions are taken and - /// the sender is not eligible for the reward. + /// In case of a successful eviction no fees are charged from the sender. However, the + /// reward is capped by the total amount of rent that was payed by the contract while + /// it was alive. + /// + /// If contract is not evicted as a result of this call, [`Error::ContractNotEvictable`] + /// is returned and the sender is not eligible for the reward. #[weight = T::WeightInfo::claim_surcharge()] - fn claim_surcharge(origin, dest: T::AccountId, aux_sender: Option) { + pub fn claim_surcharge( + origin, + dest: T::AccountId, + aux_sender: Option + ) -> DispatchResultWithPostInfo { let origin = origin.into(); let (signed, rewarded) = match (origin, aux_sender) { (Ok(frame_system::RawOrigin::Signed(account)), None) => { @@ -619,21 +675,33 @@ decl_module! { }; // If poking the contract has lead to eviction of the contract, give out the rewards. - if rent::snitch_contract_should_be_evicted::(&dest, handicap) { - T::Currency::deposit_into_existing(&rewarded, T::SurchargeReward::get())?; + if let Some(rent_payed) = + Rent::>::try_eviction(&dest, handicap)? + { + T::Currency::deposit_into_existing( + &rewarded, + T::SurchargeReward::get().min(rent_payed), + ) + .map(|_| Pays::No.into()) + .map_err(Into::into) + } else { + Err(Error::::ContractNotEvictable.into()) } } } } /// Public APIs provided by the contracts module. -impl Module { +impl Module +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Perform a call to a specified contract. /// /// This function is similar to `Self::call`, but doesn't perform any address lookups and better /// suitable for calling directly from Rust. /// - /// It returns the exection result and the amount of used weight. + /// It returns the execution result and the amount of used weight. pub fn bare_call( origin: T::AccountId, dest: T::AccountId, @@ -659,33 +727,70 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let maybe_value = storage::read_contract_storage(&contract_info.trie_id, &key); + let maybe_value = Storage::::read(&contract_info.trie_id, &key); Ok(maybe_value) } pub fn rent_projection(address: T::AccountId) -> RentProjectionResult { - rent::compute_rent_projection::(&address) + Rent::>::compute_projection(&address) } - /// Put code for benchmarks which does not check or instrument the code. + /// Store code for benchmarks which does not check nor instrument the code. #[cfg(feature = "runtime-benchmarks")] - pub fn put_code_raw(code: Vec) -> DispatchResult { + pub fn store_code_raw(code: Vec) -> DispatchResult { let schedule = >::current_schedule(); - let result = wasm::save_code_raw::(code, &schedule); - result.map(|_| ()).map_err(Into::into) + PrefabWasmModule::store_code_unchecked(code, &schedule)?; + Ok(()) + } + + /// Determine the address of a contract, + /// + /// This is the address generation function used by contract instantiation. Its result + /// is only dependend on its inputs. It can therefore be used to reliably predict the + /// address of a contract. This is akin to the formular of eth's CREATE2 opcode. There + /// is no CREATE equivalent because CREATE2 is strictly more powerful. + /// + /// Formula: `hash(deploying_address ++ code_hash ++ salt)` + pub fn contract_address( + deploying_address: &T::AccountId, + code_hash: &CodeHash, + salt: &[u8], + ) -> T::AccountId + { + let buf: Vec<_> = deploying_address.as_ref().iter() + .chain(code_hash.as_ref()) + .chain(salt) + .cloned() + .collect(); + UncheckedFrom::unchecked_from(T::Hashing::hash(&buf)) + } + + /// Subsistence threshold is the extension of the minimum balance (aka existential deposit) + /// by the tombstone deposit, required for leaving a tombstone. + /// + /// Rent or any contract initiated balance transfer mechanism cannot make the balance lower + /// than the subsistence threshold in order to guarantee that a tombstone is created. + /// + /// The only way to completely kill a contract without a tombstone is calling `seal_terminate`. + pub fn subsistence_threshold() -> BalanceOf { + T::Currency::minimum_balance().saturating_add(T::TombstoneDeposit::get()) } } -impl Module { +impl Module +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ fn execute_wasm( origin: T::AccountId, gas_meter: &mut GasMeter, - func: impl FnOnce(&mut ExecutionContext, WasmLoader>, &mut GasMeter) -> ExecResult, + func: impl FnOnce( + &mut ExecutionContext>, + &mut GasMeter, + ) -> ExecResult, ) -> ExecResult { - let cfg = Config::preload(); - let vm = WasmVm::new(&cfg.schedule); - let loader = WasmLoader::new(&cfg.schedule); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = >::current_schedule(); + let mut ctx = ExecutionContext::top_level(origin, &schedule); func(&mut ctx, gas_meter) } } @@ -694,101 +799,90 @@ decl_event! { pub enum Event where Balance = BalanceOf, - ::AccountId, - ::Hash + ::AccountId, + ::Hash { - /// Contract deployed by address at the specified address. \[owner, contract\] + /// Contract deployed by address at the specified address. \[deployer, contract\] Instantiated(AccountId, AccountId), - /// Contract has been evicted and is now in tombstone state. - /// \[contract, tombstone\] + /// Contract has been evicted and is now in tombstone state. \[contract\] + Evicted(AccountId), + + /// Contract has been terminated without leaving a tombstone. + /// \[contract, beneficiary\] /// /// # Params /// - /// - `contract`: `AccountId`: The account ID of the evicted contract. - /// - `tombstone`: `bool`: True if the evicted contract left behind a tombstone. - Evicted(AccountId, bool), + /// - `contract`: The contract that was terminated. + /// - `beneficiary`: The account that received the contracts remaining balance. + /// + /// # Note + /// + /// The only way for a contract to be removed without a tombstone and emitting + /// this event is by calling `seal_terminate`. + Terminated(AccountId, AccountId), - /// Restoration for a contract has been successful. - /// \[donor, dest, code_hash, rent_allowance\] + /// Restoration of a contract has been successful. + /// \[restorer, dest, code_hash, rent_allowance\] /// /// # Params /// - /// - `donor`: `AccountId`: Account ID of the restoring contract - /// - `dest`: `AccountId`: Account ID of the restored contract - /// - `code_hash`: `Hash`: Code hash of the restored contract - /// - `rent_allowance: `Balance`: Rent allowance of the restored contract + /// - `restorer`: Account ID of the restoring contract. + /// - `dest`: Account ID of the restored contract. + /// - `code_hash`: Code hash of the restored contract. + /// - `rent_allowance`: Rent allowance of the restored contract. Restored(AccountId, AccountId, Hash, Balance), - /// Code with the specified hash has been stored. - /// \[code_hash\] + /// Code with the specified hash has been stored. \[code_hash\] CodeStored(Hash), - /// Triggered when the current \[schedule\] is updated. + /// Triggered when the current schedule is updated. + /// \[version\] + /// + /// # Params + /// + /// - `version`: The version of the newly set schedule. ScheduleUpdated(u32), - /// An event deposited upon execution of a contract from the account. - /// \[account, data\] - ContractExecution(AccountId, Vec), + /// A custom event emitted by the contract. + /// \[contract, data\] + /// + /// # Params + /// + /// - `contract`: The contract that emitted the event. + /// - `data`: Data supplied by the contract. Metadata generated during contract + /// compilation is needed to decode it. + ContractEmitted(AccountId, Vec), + + /// A code with the specified hash was removed. + /// \[code_hash\] + /// + /// This happens when the last contract that uses this code hash was removed or evicted. + CodeRemoved(Hash), } } decl_storage! { - trait Store for Module as Contracts { + trait Store for Module as Contracts + where + T::AccountId: UncheckedFrom + AsRef<[u8]> + { /// Current cost schedule for contracts. CurrentSchedule get(fn current_schedule) config(): Schedule = Default::default(); /// A mapping from an original code hash to the original code, untouched by instrumentation. pub PristineCode: map hasher(identity) CodeHash => Option>; /// A mapping between an original code hash and instrumented wasm code, ready for execution. - pub CodeStorage: map hasher(identity) CodeHash => Option; + pub CodeStorage: map hasher(identity) CodeHash => Option>; /// The subtrie counter. pub AccountCounter: u64 = 0; /// The code associated with a given account. /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. pub ContractInfoOf: map hasher(twox_64_concat) T::AccountId => Option>; - } -} - -/// In-memory cache of configuration values. -/// -/// We assume that these values can't be changed in the -/// course of transaction execution. -pub struct Config { - pub schedule: Schedule, - pub existential_deposit: BalanceOf, - pub tombstone_deposit: BalanceOf, - pub max_depth: u32, - pub max_value_size: u32, -} - -impl Config { - fn preload() -> Config { - Config { - schedule: >::current_schedule(), - existential_deposit: T::Currency::minimum_balance(), - tombstone_deposit: T::TombstoneDeposit::get(), - max_depth: T::MaxDepth::get(), - max_value_size: T::MaxValueSize::get(), - } - } - - /// Subsistence threshold is the extension of the minimum balance (aka existential deposit) by the - /// tombstone deposit, required for leaving a tombstone. - /// - /// Rent or any contract initiated balance transfer mechanism cannot make the balance lower - /// than the subsistence threshold in order to guarantee that a tombstone is created. - /// - /// The only way to completely kill a contract without a tombstone is calling `seal_terminate`. - pub fn subsistence_threshold(&self) -> BalanceOf { - self.existential_deposit.saturating_add(self.tombstone_deposit) - } - - /// The same as `subsistence_threshold` but without the need for a preloaded instance. - /// - /// This is for cases where this value is needed in rent calculation rather than - /// during contract execution. - pub fn subsistence_threshold_uncached() -> BalanceOf { - T::Currency::minimum_balance().saturating_add(T::TombstoneDeposit::get()) + /// Evicted contracts that await child trie deletion. + /// + /// Child trie deletion is a heavy operation depending on the amount of storage items + /// stored in said trie. Therefore this operation is performed lazily in `on_initialize`. + pub DeletionQueue: Vec; } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 3dc473363190b..38b1e8bd11753 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -1,42 +1,50 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! A module responsible for computing the right amount of weight and charging it. use crate::{ AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, - TombstoneContractInfo, Trait, CodeHash, Config + TombstoneContractInfo, Config, CodeHash, Error, + storage::Storage, wasm::PrefabWasmModule, exec::Executable, }; use sp_std::prelude::*; use sp_io::hashing::blake2_256; -use frame_support::storage::child; -use frame_support::traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReasons}; -use frame_support::StorageMap; +use sp_core::crypto::UncheckedFrom; +use frame_support::{ + debug, StorageMap, + storage::child, + traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReasons}, +}; use pallet_contracts_primitives::{ContractAccessError, RentProjection, RentProjectionResult}; -use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, Saturating, Zero}; +use sp_runtime::{ + DispatchError, + traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, Saturating, Zero}, +}; /// The amount to charge. /// /// This amount respects the contract's rent allowance and the subsistence deposit. /// Because of that, charging the amount cannot remove the contract. -struct OutstandingAmount { +struct OutstandingAmount { amount: BalanceOf, } -impl OutstandingAmount { +impl OutstandingAmount { /// Create the new outstanding amount. /// /// The amount should be always withdrawable and it should not kill the account. @@ -63,16 +71,12 @@ impl OutstandingAmount { } } -enum Verdict { +enum Verdict { /// The contract is exempted from paying rent. /// /// For example, it already paid its rent in the current block, or it has enough deposit for not /// paying rent at all. Exempt, - /// Funds dropped below the subsistence deposit. - /// - /// Remove the contract along with it's storage. - Kill, /// The contract cannot afford payment within its rent budget so it gets evicted. However, /// because its balance is greater than the subsistence threshold it leaves a tombstone. Evict { @@ -82,405 +86,443 @@ enum Verdict { Charge { amount: OutstandingAmount }, } -/// Returns a fee charged per block from the contract. -/// -/// This function accounts for the storage rent deposit. I.e. if the contract possesses enough funds -/// then the fee can drop to zero. -fn compute_fee_per_block( - free_balance: &BalanceOf, - contract: &AliveContractInfo, -) -> BalanceOf { - let free_storage = free_balance - .checked_div(&T::RentDepositOffset::get()) - .unwrap_or_else(Zero::zero); - - // For now, we treat every empty KV pair as if it was one byte long. - let empty_pairs_equivalent = contract.empty_pair_count; - - let effective_storage_size = >::from( - contract.storage_size + T::StorageSizeOffset::get() + empty_pairs_equivalent, - ) - .saturating_sub(free_storage); - - effective_storage_size - .checked_mul(&T::RentByteFee::get()) - .unwrap_or_else(|| >::max_value()) -} - -/// Returns amount of funds available to consume by rent mechanism. -/// -/// Rent mechanism cannot consume more than `rent_allowance` set by the contract and it cannot make -/// the balance lower than [`subsistence_threshold`]. -/// -/// In case the toal_balance is below the subsistence threshold, this function returns `None`. -fn rent_budget( - total_balance: &BalanceOf, - free_balance: &BalanceOf, - contract: &AliveContractInfo, -) -> Option> { - let subsistence_threshold = Config::::subsistence_threshold_uncached(); - // Reserved balance contributes towards the subsistence threshold to stay consistent - // with the existential deposit where the reserved balance is also counted. - if *total_balance < subsistence_threshold { - return None; - } +pub struct Rent(sp_std::marker::PhantomData<(T, E)>); - // However, reserved balance cannot be charged so we need to use the free balance - // to calculate the actual budget (which can be 0). - let rent_allowed_to_charge = free_balance.saturating_sub(subsistence_threshold); - Some(>::min( - contract.rent_allowance, - rent_allowed_to_charge, - )) -} - -/// Consider the case for rent payment of the given account and returns a `Verdict`. -/// -/// Use `handicap` in case you want to change the reference block number. (To get more details see -/// `snitch_contract_should_be_evicted` ). -fn consider_case( - account: &T::AccountId, - current_block_number: T::BlockNumber, - handicap: T::BlockNumber, - contract: &AliveContractInfo, -) -> Verdict { - // How much block has passed since the last deduction for the contract. - let blocks_passed = { - // Calculate an effective block number, i.e. after adjusting for handicap. - let effective_block_number = current_block_number.saturating_sub(handicap); - effective_block_number.saturating_sub(contract.deduct_block) - }; - if blocks_passed.is_zero() { - // Rent has already been paid - return Verdict::Exempt; +impl Rent +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + E: Executable, +{ + /// Returns a fee charged per block from the contract. + /// + /// This function accounts for the storage rent deposit. I.e. if the contract possesses enough funds + /// then the fee can drop to zero. + fn compute_fee_per_block( + free_balance: &BalanceOf, + contract: &AliveContractInfo, + code_size_share: u32, + ) -> BalanceOf { + let uncovered_by_balance = T::DepositPerStorageByte::get() + .saturating_mul(contract.storage_size.saturating_add(code_size_share).into()) + .saturating_add( + T::DepositPerStorageItem::get() + .saturating_mul(contract.pair_count.into()) + ) + .saturating_add(T::DepositPerContract::get()) + .saturating_sub(*free_balance); + T::RentFraction::get().mul_ceil(uncovered_by_balance) } - let total_balance = T::Currency::total_balance(account); - let free_balance = T::Currency::free_balance(account); + /// Returns amount of funds available to consume by rent mechanism. + /// + /// Rent mechanism cannot consume more than `rent_allowance` set by the contract and it cannot make + /// the balance lower than [`subsistence_threshold`]. + /// + /// In case the toal_balance is below the subsistence threshold, this function returns `None`. + fn rent_budget( + total_balance: &BalanceOf, + free_balance: &BalanceOf, + contract: &AliveContractInfo, + ) -> Option> { + let subsistence_threshold = Module::::subsistence_threshold(); + // Reserved balance contributes towards the subsistence threshold to stay consistent + // with the existential deposit where the reserved balance is also counted. + if *total_balance < subsistence_threshold { + return None; + } - // An amount of funds to charge per block for storage taken up by the contract. - let fee_per_block = compute_fee_per_block::(&free_balance, contract); - if fee_per_block.is_zero() { - // The rent deposit offset reduced the fee to 0. This means that the contract - // gets the rent for free. - return Verdict::Exempt; + // However, reserved balance cannot be charged so we need to use the free balance + // to calculate the actual budget (which can be 0). + let rent_allowed_to_charge = free_balance.saturating_sub(subsistence_threshold); + Some(>::min( + contract.rent_allowance, + rent_allowed_to_charge, + )) } - let rent_budget = match rent_budget::(&total_balance, &free_balance, contract) { - Some(rent_budget) => rent_budget, - None => { - // The contract's total balance is already below subsistence threshold. That - // indicates that the contract cannot afford to leave a tombstone. - // - // So cleanly wipe the contract. - return Verdict::Kill; - } - }; - - let dues = fee_per_block - .checked_mul(&blocks_passed.saturated_into::().into()) - .unwrap_or_else(|| >::max_value()); - let insufficient_rent = rent_budget < dues; - - // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the - // account. - // - // NOTE: This seems problematic because it provides a way to tombstone an account while - // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance - // for their contract to 0. - let dues_limited = dues.min(rent_budget); - let can_withdraw_rent = T::Currency::ensure_can_withdraw( - account, - dues_limited, - WithdrawReasons::FEE, - free_balance.saturating_sub(dues_limited), - ) - .is_ok(); - - if insufficient_rent || !can_withdraw_rent { - // The contract cannot afford the rent payment and has a balance above the subsistence - // threshold, so it leaves a tombstone. - let amount = if can_withdraw_rent { - Some(OutstandingAmount::new(dues_limited)) - } else { - None + /// Consider the case for rent payment of the given account and returns a `Verdict`. + /// + /// Use `handicap` in case you want to change the reference block number. (To get more details see + /// `try_eviction` ). + fn consider_case( + account: &T::AccountId, + current_block_number: T::BlockNumber, + handicap: T::BlockNumber, + contract: &AliveContractInfo, + code_size: u32, + ) -> Verdict { + // How much block has passed since the last deduction for the contract. + let blocks_passed = { + // Calculate an effective block number, i.e. after adjusting for handicap. + let effective_block_number = current_block_number.saturating_sub(handicap); + effective_block_number.saturating_sub(contract.deduct_block) }; - return Verdict::Evict { amount }; - } + if blocks_passed.is_zero() { + // Rent has already been paid + return Verdict::Exempt; + } - return Verdict::Charge { - // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. - amount: OutstandingAmount::new(dues_limited), - }; -} + let total_balance = T::Currency::total_balance(account); + let free_balance = T::Currency::free_balance(account); -/// Enacts the given verdict and returns the updated `ContractInfo`. -/// -/// `alive_contract_info` should be from the same address as `account`. -fn enact_verdict( - account: &T::AccountId, - alive_contract_info: AliveContractInfo, - current_block_number: T::BlockNumber, - verdict: Verdict, -) -> Option> { - match verdict { - Verdict::Exempt => return Some(ContractInfo::Alive(alive_contract_info)), - Verdict::Kill => { - >::remove(account); - child::kill_storage( - &alive_contract_info.child_trie_info(), - ); - >::deposit_event(RawEvent::Evicted(account.clone(), false)); - None + // An amount of funds to charge per block for storage taken up by the contract. + let fee_per_block = Self::compute_fee_per_block(&free_balance, contract, code_size); + if fee_per_block.is_zero() { + // The rent deposit offset reduced the fee to 0. This means that the contract + // gets the rent for free. + return Verdict::Exempt; } - Verdict::Evict { amount } => { - if let Some(amount) = amount { - amount.withdraw(account); - } - // Note: this operation is heavy. - let child_storage_root = child::root( - &alive_contract_info.child_trie_info(), - ); + let rent_budget = match Self::rent_budget(&total_balance, &free_balance, contract) { + Some(rent_budget) => rent_budget, + None => { + // All functions that allow a contract to transfer balance enforce + // that the contract always stays above the subsistence threshold. + // We want the rent system to always leave a tombstone to prevent the + // accidental loss of a contract. Ony `seal_terminate` can remove a + // contract without a tombstone. Therefore this case should be never + // hit. + debug::error!( + "Tombstoned a contract that is below the subsistence threshold: {:?}", + account + ); + 0u32.into() + } + }; - let tombstone = >::new( - &child_storage_root[..], - alive_contract_info.code_hash, - ); - let tombstone_info = ContractInfo::Tombstone(tombstone); - >::insert(account, &tombstone_info); + let dues = fee_per_block + .checked_mul(&blocks_passed.saturated_into::().into()) + .unwrap_or_else(|| >::max_value()); + let insufficient_rent = rent_budget < dues; + + // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the + // account. + // + // NOTE: This seems problematic because it provides a way to tombstone an account while + // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance + // for their contract to 0. + let dues_limited = dues.min(rent_budget); + let can_withdraw_rent = T::Currency::ensure_can_withdraw( + account, + dues_limited, + WithdrawReasons::FEE, + free_balance.saturating_sub(dues_limited), + ) + .is_ok(); + + if insufficient_rent || !can_withdraw_rent { + // The contract cannot afford the rent payment and has a balance above the subsistence + // threshold, so it leaves a tombstone. + let amount = if can_withdraw_rent { + Some(OutstandingAmount::new(dues_limited)) + } else { + None + }; + return Verdict::Evict { amount }; + } - child::kill_storage( - &alive_contract_info.child_trie_info(), - ); + return Verdict::Charge { + // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. + amount: OutstandingAmount::new(dues_limited), + }; + } - >::deposit_event(RawEvent::Evicted(account.clone(), true)); - Some(tombstone_info) - } - Verdict::Charge { amount } => { - let contract_info = ContractInfo::Alive(AliveContractInfo:: { - rent_allowance: alive_contract_info.rent_allowance - amount.peek(), - deduct_block: current_block_number, - ..alive_contract_info - }); - >::insert(account, &contract_info); - - amount.withdraw(account); - Some(contract_info) + /// Enacts the given verdict and returns the updated `ContractInfo`. + /// + /// `alive_contract_info` should be from the same address as `account`. + /// + /// # Note + /// + /// if `evictable_code` is `None` an `Evict` verdict will not be enacted. This is for + /// when calling this function during a `call` where access to the soon to be evicted + /// contract should be denied but storage should be left unmodified. + fn enact_verdict( + account: &T::AccountId, + alive_contract_info: AliveContractInfo, + current_block_number: T::BlockNumber, + verdict: Verdict, + evictable_code: Option>, + ) -> Result>, DispatchError> { + match (verdict, evictable_code) { + (Verdict::Exempt, _) => return Ok(Some(alive_contract_info)), + (Verdict::Evict { amount }, Some(code)) => { + // We need to remove the trie first because it is the only operation + // that can fail and this function is called without a storage + // transaction when called through `claim_surcharge`. + Storage::::queue_trie_for_deletion(&alive_contract_info)?; + + if let Some(amount) = amount { + amount.withdraw(account); + } + + // Note: this operation is heavy. + let child_storage_root = child::root( + &alive_contract_info.child_trie_info(), + ); + + let tombstone = >::new( + &child_storage_root[..], + alive_contract_info.code_hash, + ); + let tombstone_info = ContractInfo::Tombstone(tombstone); + >::insert(account, &tombstone_info); + code.drop_from_storage(); + >::deposit_event(RawEvent::Evicted(account.clone())); + Ok(None) + } + (Verdict::Evict { amount: _ }, None) => { + Ok(None) + } + (Verdict::Charge { amount }, _) => { + let contract = ContractInfo::Alive(AliveContractInfo:: { + rent_allowance: alive_contract_info.rent_allowance - amount.peek(), + deduct_block: current_block_number, + rent_payed: alive_contract_info.rent_payed.saturating_add(amount.peek()), + ..alive_contract_info + }); + >::insert(account, &contract); + amount.withdraw(account); + Ok(Some(contract.get_alive().expect("We just constructed it as alive. qed"))) + } } } -} -/// Make account paying the rent for the current block number -/// -/// NOTE this function performs eviction eagerly. All changes are read and written directly to -/// storage. -pub fn collect_rent(account: &T::AccountId) -> Option> { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return contract_info, - Some(ContractInfo::Alive(contract)) => contract, - }; - - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - enact_verdict(account, alive_contract_info, current_block_number, verdict) -} + /// Make account paying the rent for the current block number + /// + /// This functions does **not** evict the contract. It returns `None` in case the + /// contract is in need of eviction. [`try_eviction`] must + /// be called to perform the eviction. + pub fn charge( + account: &T::AccountId, + contract: AliveContractInfo, + code_size: u32, + ) -> Result>, DispatchError> { + let current_block_number = >::block_number(); + let verdict = Self::consider_case( + account, + current_block_number, + Zero::zero(), + &contract, + code_size, + ); + Self::enact_verdict(account, contract, current_block_number, verdict, None) + } -/// Process a report that a contract under the given address should be evicted. -/// -/// Enact the eviction right away if the contract should be evicted and return true. -/// Otherwise, **do nothing** and return false. -/// -/// The `handicap` parameter gives a way to check the rent to a moment in the past instead -/// of current block. E.g. if the contract is going to be evicted at the current block, -/// `handicap = 1` can defer the eviction for 1 block. This is useful to handicap certain snitchers -/// relative to others. -/// -/// NOTE this function performs eviction eagerly. All changes are read and written directly to -/// storage. -pub fn snitch_contract_should_be_evicted( - account: &T::AccountId, - handicap: T::BlockNumber, -) -> bool { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return false, - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - handicap, - &alive_contract_info, - ); - - // Enact the verdict only if the contract gets removed. - match verdict { - Verdict::Kill | Verdict::Evict { .. } => { - enact_verdict(account, alive_contract_info, current_block_number, verdict); - true + /// Process a report that a contract under the given address should be evicted. + /// + /// Enact the eviction right away if the contract should be evicted and return the amount + /// of rent that the contract payed over its lifetime. + /// Otherwise, **do nothing** and return None. + /// + /// The `handicap` parameter gives a way to check the rent to a moment in the past instead + /// of current block. E.g. if the contract is going to be evicted at the current block, + /// `handicap = 1` can defer the eviction for 1 block. This is useful to handicap certain snitchers + /// relative to others. + /// + /// NOTE this function performs eviction eagerly. All changes are read and written directly to + /// storage. + pub fn try_eviction( + account: &T::AccountId, + handicap: T::BlockNumber, + ) -> Result>, DispatchError> { + let contract = >::get(account); + let contract = match contract { + None | Some(ContractInfo::Tombstone(_)) => return Ok(None), + Some(ContractInfo::Alive(contract)) => contract, + }; + let module = PrefabWasmModule::::from_storage_noinstr(contract.code_hash)?; + let current_block_number = >::block_number(); + let verdict = Self::consider_case( + account, + current_block_number, + handicap, + &contract, + module.occupied_storage(), + ); + + // Enact the verdict only if the contract gets removed. + match verdict { + Verdict::Evict { ref amount } => { + // The outstanding `amount` is withdrawn inside `enact_verdict`. + let rent_payed = amount + .as_ref() + .map(|a| a.peek()) + .unwrap_or_else(|| >::zero()) + .saturating_add(contract.rent_payed); + Self::enact_verdict( + account, contract, current_block_number, verdict, Some(module), + )?; + Ok(Some(rent_payed)) + } + _ => Ok(None), } - _ => false, } -} -/// Returns the projected time a given contract will be able to sustain paying its rent. The -/// returned projection is relevant for the current block, i.e. it is as if the contract was -/// accessed at the beginning of the current block. Returns `None` in case if the contract was -/// evicted before or as a result of the rent collection. -/// -/// The returned value is only an estimation. It doesn't take into account any top ups, changing the -/// rent allowance, or any problems coming from withdrawing the dues. -/// -/// NOTE that this is not a side-effect free function! It will actually collect rent and then -/// compute the projection. This function is only used for implementation of an RPC method through -/// `RuntimeApi` meaning that the changes will be discarded anyway. -pub fn compute_rent_projection( - account: &T::AccountId, -) -> RentProjectionResult { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - let new_contract_info = - enact_verdict(account, alive_contract_info, current_block_number, verdict); - - // Check what happened after enaction of the verdict. - let alive_contract_info = match new_contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - - // Compute how much would the fee per block be with the *updated* balance. - let total_balance = T::Currency::total_balance(account); - let free_balance = T::Currency::free_balance(account); - let fee_per_block = compute_fee_per_block::(&free_balance, &alive_contract_info); - if fee_per_block.is_zero() { - return Ok(RentProjection::NoEviction); - } + /// Returns the projected time a given contract will be able to sustain paying its rent. The + /// returned projection is relevant for the current block, i.e. it is as if the contract was + /// accessed at the beginning of the current block. Returns `None` in case if the contract was + /// evicted before or as a result of the rent collection. + /// + /// The returned value is only an estimation. It doesn't take into account any top ups, changing the + /// rent allowance, or any problems coming from withdrawing the dues. + /// + /// NOTE that this is not a side-effect free function! It will actually collect rent and then + /// compute the projection. This function is only used for implementation of an RPC method through + /// `RuntimeApi` meaning that the changes will be discarded anyway. + pub fn compute_projection( + account: &T::AccountId, + ) -> RentProjectionResult { + use ContractAccessError::IsTombstone; + + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return Err(IsTombstone), + Some(ContractInfo::Alive(contract)) => contract, + }; + let module = PrefabWasmModule::from_storage_noinstr(alive_contract_info.code_hash) + .map_err(|_| IsTombstone)?; + let code_size = module.occupied_storage(); + let current_block_number = >::block_number(); + let verdict = Self::consider_case( + account, + current_block_number, + Zero::zero(), + &alive_contract_info, + code_size, + ); + let new_contract_info = Self::enact_verdict( + account, alive_contract_info, current_block_number, verdict, Some(module), + ); + + // Check what happened after enaction of the verdict. + let alive_contract_info = match new_contract_info.map_err(|_| IsTombstone)? { + None => return Err(IsTombstone), + Some(contract) => contract, + }; - // Then compute how much the contract will sustain under these circumstances. - let rent_budget = rent_budget::(&total_balance, &free_balance, &alive_contract_info).expect( - "the contract exists and in the alive state; - the updated balance must be greater than subsistence deposit; - this function doesn't return `None`; - qed - ", - ); - let blocks_left = match rent_budget.checked_div(&fee_per_block) { - Some(blocks_left) => blocks_left, - None => { - // `fee_per_block` is not zero here, so `checked_div` can return `None` if - // there is an overflow. This cannot happen with integers though. Return - // `NoEviction` here just in case. + // Compute how much would the fee per block be with the *updated* balance. + let total_balance = T::Currency::total_balance(account); + let free_balance = T::Currency::free_balance(account); + let fee_per_block = Self::compute_fee_per_block( + &free_balance, &alive_contract_info, code_size, + ); + if fee_per_block.is_zero() { return Ok(RentProjection::NoEviction); } - }; - let blocks_left = blocks_left.saturated_into::().into(); - Ok(RentProjection::EvictionAt( - current_block_number + blocks_left, - )) -} + // Then compute how much the contract will sustain under these circumstances. + let rent_budget = Self::rent_budget(&total_balance, &free_balance, &alive_contract_info).expect( + "the contract exists and in the alive state; + the updated balance must be greater than subsistence deposit; + this function doesn't return `None`; + qed + ", + ); + let blocks_left = match rent_budget.checked_div(&fee_per_block) { + Some(blocks_left) => blocks_left, + None => { + // `fee_per_block` is not zero here, so `checked_div` can return `None` if + // there is an overflow. This cannot happen with integers though. Return + // `NoEviction` here just in case. + return Ok(RentProjection::NoEviction); + } + }; -/// Restores the destination account using the origin as prototype. -/// -/// The restoration will be performed iff: -/// - origin exists and is alive, -/// - the origin's storage is not written in the current block -/// - the restored account has tombstone -/// - the tombstone matches the hash of the origin storage root, and code hash. -/// -/// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to -/// the restored account. The restored account will inherit the last write block and its last -/// deduct block will be set to the current block. -pub fn restore_to( - origin: T::AccountId, - dest: T::AccountId, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, -) -> Result<(), &'static str> { - let mut origin_contract = >::get(&origin) - .and_then(|c| c.get_alive()) - .ok_or("Cannot restore from inexisting or tombstone contract")?; - - let child_trie_info = origin_contract.child_trie_info(); - - let current_block = >::block_number(); - - if origin_contract.last_write == Some(current_block) { - return Err("Origin TrieId written in the current block"); + let blocks_left = blocks_left.saturated_into::().into(); + Ok(RentProjection::EvictionAt( + current_block_number + blocks_left, + )) } - let dest_tombstone = >::get(&dest) - .and_then(|c| c.get_tombstone()) - .ok_or("Cannot restore to inexisting or alive contract")?; - - let last_write = if !delta.is_empty() { - Some(current_block) - } else { - origin_contract.last_write - }; - - let key_values_taken = delta.iter() - .filter_map(|key| { - child::get_raw(&child_trie_info, &blake2_256(key)).map(|value| { - child::kill(&child_trie_info, &blake2_256(key)); - (key, value) + /// Restores the destination account using the origin as prototype. + /// + /// The restoration will be performed iff: + /// - the supplied code_hash does still exist on-chain + /// - origin exists and is alive, + /// - the origin's storage is not written in the current block + /// - the restored account has tombstone + /// - the tombstone matches the hash of the origin storage root, and code hash. + /// + /// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to + /// the restored account. The restored account will inherit the last write block and its last + /// deduct block will be set to the current block. + pub fn restore_to( + origin: T::AccountId, + dest: T::AccountId, + code_hash: CodeHash, + rent_allowance: BalanceOf, + delta: Vec, + ) -> Result<(), DispatchError> { + let mut origin_contract = >::get(&origin) + .and_then(|c| c.get_alive()) + .ok_or(Error::::InvalidSourceContract)?; + + let child_trie_info = origin_contract.child_trie_info(); + + let current_block = >::block_number(); + + if origin_contract.last_write == Some(current_block) { + return Err(Error::::InvalidContractOrigin.into()); + } + + let dest_tombstone = >::get(&dest) + .and_then(|c| c.get_tombstone()) + .ok_or(Error::::InvalidDestinationContract)?; + + let last_write = if !delta.is_empty() { + Some(current_block) + } else { + origin_contract.last_write + }; + + // Fails if the code hash does not exist on chain + E::add_user(code_hash)?; + + // We are allowed to eagerly modify storage even though the function can + // fail later due to tombstones not matching. This is because the restoration + // is always called from a contract and therefore in a storage transaction. + // The failure of this function will lead to this transaction's rollback. + let bytes_taken: u32 = delta.iter() + .filter_map(|key| { + let key = blake2_256(key); + child::get_raw(&child_trie_info, &key).map(|value| { + child::kill(&child_trie_info, &key); + value.len() as u32 + }) }) - }) - .collect::>(); - - let tombstone = >::new( - // This operation is cheap enough because last_write (delta not included) - // is not this block as it has been checked earlier. - &child::root(&child_trie_info)[..], - code_hash, - ); - - if tombstone != dest_tombstone { - for (key, value) in key_values_taken { - child::put_raw(&child_trie_info, &blake2_256(key), &value); + .sum(); + + let tombstone = >::new( + // This operation is cheap enough because last_write (delta not included) + // is not this block as it has been checked earlier. + &child::root(&child_trie_info)[..], + code_hash, + ); + + if tombstone != dest_tombstone { + return Err(Error::::InvalidTombstone.into()); } - return Err("Tombstones don't match"); + origin_contract.storage_size -= bytes_taken; + + >::remove(&origin); + E::remove_user(origin_contract.code_hash); + >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { + trie_id: origin_contract.trie_id, + storage_size: origin_contract.storage_size, + pair_count: origin_contract.pair_count, + code_hash, + rent_allowance, + rent_payed: >::zero(), + deduct_block: current_block, + last_write, + })); + + let origin_free_balance = T::Currency::free_balance(&origin); + T::Currency::make_free_balance_be(&origin, >::zero()); + T::Currency::deposit_creating(&dest, origin_free_balance); + + Ok(()) } - - origin_contract.storage_size -= key_values_taken.iter() - .map(|(_, value)| value.len() as u32) - .sum::(); - - >::remove(&origin); - >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { - trie_id: origin_contract.trie_id, - storage_size: origin_contract.storage_size, - empty_pair_count: origin_contract.empty_pair_count, - total_pair_count: origin_contract.total_pair_count, - code_hash, - rent_allowance, - deduct_block: current_block, - last_write, - })); - - let origin_free_balance = T::Currency::free_balance(&origin); - T::Currency::make_free_balance_be(&origin, >::zero()); - T::Currency::deposit_creating(&dest, origin_free_balance); - - Ok(()) } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index fb38b1b895d18..3580fa2aae209 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -1,93 +1,204 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{Trait, WeightInfo}; +use crate::{Config, weights::WeightInfo}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; +use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; use frame_support::weights::Weight; -use sp_std::{marker::PhantomData, fmt}; +use sp_std::{marker::PhantomData, vec::Vec}; use codec::{Encode, Decode}; +use parity_wasm::elements; +use pwasm_utils::rules; +use sp_runtime::RuntimeDebug; /// How many API calls are executed in a single batch. The reason for increasing the amount /// of API calls in batches (per benchmark component increase) is so that the linear regression /// has an easier time determining the contribution of that component. pub const API_BENCHMARK_BATCH_SIZE: u32 = 100; +/// How many instructions are executed in a single batch. The reasoning is the same +/// as for `API_BENCHMARK_BATCH_SIZE`. +pub const INSTR_BENCHMARK_BATCH_SIZE: u32 = 1_000; + /// Definition of the cost schedule and other parameterizations for wasm vm. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct Schedule { +#[cfg_attr(feature = "std", serde(bound(serialize = "", deserialize = "")))] +#[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug)] +pub struct Schedule { /// Version of the schedule. pub version: u32, - /// The weights for individual wasm instructions. - pub instruction_weights: InstructionWeights, - - /// The weights for each imported function a contract is allowed to call. - pub host_fn_weights: HostFnWeights, - /// Whether the `seal_println` function is allowed to be used contracts. /// MUST only be enabled for `dev` chains, NOT for production chains pub enable_println: bool, + /// Describes the upper limits on various metrics. + pub limits: Limits, + + /// The weights for individual wasm instructions. + pub instruction_weights: InstructionWeights, + + /// The weights for each imported function a contract is allowed to call. + pub host_fn_weights: HostFnWeights, +} + +/// Describes the upper limits on various metrics. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] +pub struct Limits { /// The maximum number of topics supported by an event. - pub max_event_topics: u32, + pub event_topics: u32, + + /// Maximum allowed stack height in number of elements. + /// + /// See to find out + /// how the stack frame cost is calculated. Each element can be of one of the + /// wasm value types. This means the maximum size per element is 64bit. + pub stack_height: u32, - /// Maximum allowed stack height. + /// Maximum number of globals a module is allowed to declare. /// - /// See https://wiki.parity.io/WebAssembly-StackHeight to find out - /// how the stack frame cost is calculated. - pub max_stack_height: u32, + /// Globals are not limited through the `stack_height` as locals are. Neither does + /// the linear memory limit `memory_pages` applies to them. + pub globals: u32, + + /// Maximum numbers of parameters a function can have. + /// + /// Those need to be limited to prevent a potentially exploitable interaction with + /// the stack height instrumentation: The costs of executing the stack height + /// instrumentation for an indirectly called function scales linearly with the amount + /// of parameters of this function. Because the stack height instrumentation itself is + /// is not weight metered its costs must be static (via this limit) and included in + /// the costs of the instructions that cause them (call, call_indirect). + pub parameters: u32, /// Maximum number of memory pages allowed for a contract. - pub max_memory_pages: u32, + pub memory_pages: u32, - /// Maximum allowed size of a declared table. - pub max_table_size: u32, + /// Maximum number of elements allowed in a table. + /// + /// Currently, the only type of element that is allowed in a table is funcref. + pub table_size: u32, - /// The maximum length of a subject used for PRNG generation. - pub max_subject_len: u32, + /// Maximum number of elements that can appear as immediate value to the br_table instruction. + pub br_table_size: u32, + + /// The maximum length of a subject in bytes used for PRNG generation. + pub subject_len: u32, /// The maximum length of a contract code in bytes. This limit applies to the uninstrumented - /// and pristine form of the code as supplied to `put_code`. - pub max_code_size: u32, + /// and pristine form of the code as supplied to `instantiate_with_code`. + pub code_size: u32, +} - /// The type parameter is used in the default implementation. - pub _phantom: PhantomData, +impl Limits { + /// The maximum memory size in bytes that a contract can occupy. + pub fn max_memory_size(&self) -> u32 { + self.memory_pages * 64 * 1024 + } } /// Describes the weight for all categories of supported wasm instructions. +/// +/// There there is one field for each wasm instruction that describes the weight to +/// execute one instruction of that name. There are a few execptions: +/// +/// 1. If there is a i64 and a i32 variant of an instruction we use the weight +/// of the former for both. +/// 2. The following instructions are free of charge because they merely structure the +/// wasm module and cannot be spammed without making the module invalid (and rejected): +/// End, Unreachable, Return, Else +/// 3. The following instructions cannot be benchmarked because they are removed by any +/// real world execution engine as a preprocessing step and therefore don't yield a +/// meaningful benchmark result. However, in contrast to the instructions mentioned +/// in 2. they can be spammed. We price them with the same weight as the "default" +/// instruction (i64.const): Block, Loop, Nop +/// 4. We price both i64.const and drop as InstructionWeights.i64const / 2. The reason +/// for that is that we cannot benchmark either of them on its own but we need their +/// individual values to derive (by subtraction) the weight of all other instructions +/// that use them as supporting instructions. Supporting means mainly pushing arguments +/// and dropping return values in order to maintain a valid module. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct InstructionWeights { - /// Weight of a growing memory by single page. - pub grow_mem: Weight, - - /// Weight of a regular operation. - pub regular: Weight, +#[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug)] +pub struct InstructionWeights { + pub i64const: u32, + pub i64load: u32, + pub i64store: u32, + pub select: u32, + pub r#if: u32, + pub br: u32, + pub br_if: u32, + pub br_table: u32, + pub br_table_per_entry: u32, + pub call: u32, + pub call_indirect: u32, + pub call_indirect_per_param: u32, + pub local_get: u32, + pub local_set: u32, + pub local_tee: u32, + pub global_get: u32, + pub global_set: u32, + pub memory_current: u32, + pub memory_grow: u32, + pub i64clz: u32, + pub i64ctz: u32, + pub i64popcnt: u32, + pub i64eqz: u32, + pub i64extendsi32: u32, + pub i64extendui32: u32, + pub i32wrapi64: u32, + pub i64eq: u32, + pub i64ne: u32, + pub i64lts: u32, + pub i64ltu: u32, + pub i64gts: u32, + pub i64gtu: u32, + pub i64les: u32, + pub i64leu: u32, + pub i64ges: u32, + pub i64geu: u32, + pub i64add: u32, + pub i64sub: u32, + pub i64mul: u32, + pub i64divs: u32, + pub i64divu: u32, + pub i64rems: u32, + pub i64remu: u32, + pub i64and: u32, + pub i64or: u32, + pub i64xor: u32, + pub i64shl: u32, + pub i64shrs: u32, + pub i64shru: u32, + pub i64rotl: u32, + pub i64rotr: u32, + /// The type parameter is used in the default implementation. + pub _phantom: PhantomData, } /// Describes the weight for each imported function that a contract is allowed to call. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct HostFnWeights { +#[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug)] +pub struct HostFnWeights { /// Weight of calling `seal_caller`. pub caller: Weight, @@ -199,6 +310,9 @@ pub struct HostFnWeights { /// Weight per output byte received through `seal_instantiate`. pub instantiate_per_output_byte: Weight, + /// Weight per salt byte supplied to `seal_instantiate`. + pub instantiate_per_salt_byte: Weight, + /// Weight of calling `seal_hash_sha_256`. pub hash_sha2_256: Weight, @@ -222,21 +336,11 @@ pub struct HostFnWeights { /// Weight per byte hashed by `seal_hash_blake2_128`. pub hash_blake2_128_per_byte: Weight, -} -/// We need to implement Debug manually because the automatic derive enforces T -/// to also implement Debug. -impl fmt::Debug for Schedule { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Schedule").finish() - } + /// The type parameter is used in the default implementation. + pub _phantom: PhantomData } -/// 500 (2 instructions per nano second on 2GHZ) * 1000x slowdown through wasmi -/// This is a wild guess and should be viewed as a rough estimation. -/// Proper benchmarks are needed before this value and its derivatives can be used in production. -const WASM_INSTRUCTION_COST: Weight = 500_000; - macro_rules! replace_token { ($_in:tt $replacement:tt) => { $replacement }; } @@ -259,6 +363,25 @@ macro_rules! cost_batched_args { } } +macro_rules! cost_instr_no_params_with_batch_size { + ($name:ident, $batch_size:expr) => { + (cost_args!($name, 1) / Weight::from($batch_size)) as u32 + } +} + +macro_rules! cost_instr_with_batch_size { + ($name:ident, $num_params:expr, $batch_size:expr) => { + cost_instr_no_params_with_batch_size!($name, $batch_size) + .saturating_sub((cost_instr_no_params_with_batch_size!(instr_i64const, $batch_size) / 2).saturating_mul($num_params)) + } +} + +macro_rules! cost_instr { + ($name:ident, $num_params:expr) => { + cost_instr_with_batch_size!($name, $num_params, INSTR_BENCHMARK_BATCH_SIZE) + } +} + macro_rules! cost_byte_args { ($name:ident, $( $arg: expr ),+) => { cost_args!($name, $( $arg ),+) / 1024 @@ -295,14 +418,99 @@ macro_rules! cost_byte_batched { } } -impl Default for Schedule { +impl Default for Schedule { fn default() -> Self { - let instruction_weights = InstructionWeights { - grow_mem: WASM_INSTRUCTION_COST, - regular: WASM_INSTRUCTION_COST, - }; + Self { + version: 0, + enable_println: false, + limits: Default::default(), + instruction_weights: Default::default(), + host_fn_weights: Default::default(), + } + } +} - let host_fn_weights = HostFnWeights { +impl Default for Limits { + fn default() -> Self { + Self { + event_topics: 4, + // 512 * sizeof(i64) will give us a 4k stack. + stack_height: 512, + globals: 256, + parameters: 128, + memory_pages: 16, + // 4k function pointers (This is in count not bytes). + table_size: 4096, + br_table_size: 256, + subject_len: 32, + code_size: 512 * 1024, + } + } +} + +impl Default for InstructionWeights { + fn default() -> Self { + let max_pages = Limits::default().memory_pages; + Self { + i64const: cost_instr!(instr_i64const, 1), + i64load: cost_instr!(instr_i64load, 2), + i64store: cost_instr!(instr_i64store, 2), + select: cost_instr!(instr_select, 4), + r#if: cost_instr!(instr_if, 3), + br: cost_instr!(instr_br, 2), + br_if: cost_instr!(instr_br_if, 5), + br_table: cost_instr!(instr_br_table, 3), + br_table_per_entry: cost_instr!(instr_br_table_per_entry, 0), + call: cost_instr!(instr_call, 2), + call_indirect: cost_instr!(instr_call_indirect, 3), + call_indirect_per_param: cost_instr!(instr_call_indirect_per_param, 1), + local_get: cost_instr!(instr_local_get, 1), + local_set: cost_instr!(instr_local_set, 1), + local_tee: cost_instr!(instr_local_tee, 2), + global_get: cost_instr!(instr_global_get, 1), + global_set: cost_instr!(instr_global_set, 1), + memory_current: cost_instr!(instr_memory_current, 1), + memory_grow: cost_instr_with_batch_size!(instr_memory_grow, 1, max_pages), + i64clz: cost_instr!(instr_i64clz, 2), + i64ctz: cost_instr!(instr_i64ctz, 2), + i64popcnt: cost_instr!(instr_i64popcnt, 2), + i64eqz: cost_instr!(instr_i64eqz, 2), + i64extendsi32: cost_instr!(instr_i64extendsi32, 2), + i64extendui32: cost_instr!(instr_i64extendui32, 2), + i32wrapi64: cost_instr!(instr_i32wrapi64, 2), + i64eq: cost_instr!(instr_i64eq, 3), + i64ne: cost_instr!(instr_i64ne, 3), + i64lts: cost_instr!(instr_i64lts, 3), + i64ltu: cost_instr!(instr_i64ltu, 3), + i64gts: cost_instr!(instr_i64gts, 3), + i64gtu: cost_instr!(instr_i64gtu, 3), + i64les: cost_instr!(instr_i64les, 3), + i64leu: cost_instr!(instr_i64leu, 3), + i64ges: cost_instr!(instr_i64ges, 3), + i64geu: cost_instr!(instr_i64geu, 3), + i64add: cost_instr!(instr_i64add, 3), + i64sub: cost_instr!(instr_i64sub, 3), + i64mul: cost_instr!(instr_i64mul, 3), + i64divs: cost_instr!(instr_i64divs, 3), + i64divu: cost_instr!(instr_i64divu, 3), + i64rems: cost_instr!(instr_i64rems, 3), + i64remu: cost_instr!(instr_i64remu, 3), + i64and: cost_instr!(instr_i64and, 3), + i64or: cost_instr!(instr_i64or, 3), + i64xor: cost_instr!(instr_i64xor, 3), + i64shl: cost_instr!(instr_i64shl, 3), + i64shrs: cost_instr!(instr_i64shrs, 3), + i64shru: cost_instr!(instr_i64shru, 3), + i64rotl: cost_instr!(instr_i64rotl, 3), + i64rotr: cost_instr!(instr_i64rotr, 3), + _phantom: PhantomData, + } + } +} + +impl Default for HostFnWeights { + fn default() -> Self { + Self { caller: cost_batched!(seal_caller), address: cost_batched!(seal_address), gas_left: cost_batched!(seal_gas_left), @@ -338,8 +546,9 @@ impl Default for Schedule { call_per_input_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 1, 0), call_per_output_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 0, 1), instantiate: cost_batched!(seal_instantiate), - instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_kb, 1, 0), - instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_kb, 0, 1), + instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 1, 0, 0), + instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 1, 0), + instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 0, 1), hash_sha2_256: cost_batched!(seal_hash_sha2_256), hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), hash_keccak_256: cost_batched!(seal_hash_keccak_256), @@ -348,20 +557,120 @@ impl Default for Schedule { hash_blake2_256_per_byte: cost_byte_batched!(seal_hash_blake2_256_per_kb), hash_blake2_128: cost_batched!(seal_hash_blake2_128), hash_blake2_128_per_byte: cost_byte_batched!(seal_hash_blake2_128_per_kb), - }; - - Self { - version: 0, - instruction_weights, - host_fn_weights, - enable_println: false, - max_event_topics: 4, - max_stack_height: 64 * 1024, - max_memory_pages: 16, - max_table_size: 16 * 1024, - max_subject_len: 32, - max_code_size: 512 * 1024, _phantom: PhantomData, } } } + +struct ScheduleRules<'a, T: Config> { + schedule: &'a Schedule, + params: Vec, +} + +impl Schedule { + pub fn rules(&self, module: &elements::Module) -> impl rules::Rules + '_ { + ScheduleRules { + schedule: &self, + params: module + .type_section() + .iter() + .flat_map(|section| section.types()) + .map(|func| { + let elements::Type::Function(func) = func; + func.params().len() as u32 + }) + .collect() + } + } +} + +impl<'a, T: Config> rules::Rules for ScheduleRules<'a, T> { + fn instruction_cost(&self, instruction: &elements::Instruction) -> Option { + use parity_wasm::elements::Instruction::*; + let w = &self.schedule.instruction_weights; + let max_params = self.schedule.limits.parameters; + + let weight = match *instruction { + End | Unreachable | Return | Else => 0, + I32Const(_) | I64Const(_) | Block(_) | Loop(_) | Nop | Drop => w.i64const, + I32Load(_, _) | I32Load8S(_, _) | I32Load8U(_, _) | I32Load16S(_, _) | + I32Load16U(_, _) | I64Load(_, _) | I64Load8S(_, _) | I64Load8U(_, _) | + I64Load16S(_, _) | I64Load16U(_, _) | I64Load32S(_, _) | I64Load32U(_, _) + => w.i64load, + I32Store(_, _) | I32Store8(_, _) | I32Store16(_, _) | I64Store(_, _) | + I64Store8(_, _) | I64Store16(_, _) | I64Store32(_, _) => w.i64store, + Select => w.select, + If(_) => w.r#if, + Br(_) => w.br, + BrIf(_) => w.br_if, + Call(_) => w.call, + GetLocal(_) => w.local_get, + SetLocal(_) => w.local_set, + TeeLocal(_) => w.local_tee, + GetGlobal(_) => w.global_get, + SetGlobal(_) => w.global_set, + CurrentMemory(_) => w.memory_current, + GrowMemory(_) => w.memory_grow, + CallIndirect(idx, _) => *self.params.get(idx as usize).unwrap_or(&max_params), + BrTable(ref data) => + w.br_table.saturating_add( + w.br_table_per_entry.saturating_mul(data.table.len() as u32) + ), + I32Clz | I64Clz => w.i64clz, + I32Ctz | I64Ctz => w.i64ctz, + I32Popcnt | I64Popcnt => w.i64popcnt, + I32Eqz | I64Eqz => w.i64eqz, + I64ExtendSI32 => w.i64extendsi32, + I64ExtendUI32 => w.i64extendui32, + I32WrapI64 => w.i32wrapi64, + I32Eq | I64Eq => w.i64eq, + I32Ne | I64Ne => w.i64ne, + I32LtS | I64LtS => w.i64lts, + I32LtU | I64LtU => w.i64ltu, + I32GtS | I64GtS => w.i64gts, + I32GtU | I64GtU => w.i64gtu, + I32LeS | I64LeS => w.i64les, + I32LeU | I64LeU => w.i64leu, + I32GeS | I64GeS => w.i64ges, + I32GeU | I64GeU => w.i64geu, + I32Add | I64Add => w.i64add, + I32Sub | I64Sub => w.i64sub, + I32Mul | I64Mul => w.i64mul, + I32DivS | I64DivS => w.i64divs, + I32DivU | I64DivU => w.i64divu, + I32RemS | I64RemS => w.i64rems, + I32RemU | I64RemU => w.i64remu, + I32And | I64And => w.i64and, + I32Or | I64Or => w.i64or, + I32Xor | I64Xor => w.i64xor, + I32Shl | I64Shl => w.i64shl, + I32ShrS | I64ShrS => w.i64shrs, + I32ShrU | I64ShrU => w.i64shru, + I32Rotl | I64Rotl => w.i64rotl, + I32Rotr | I64Rotr => w.i64rotr, + + // Returning None makes the gas instrumentation fail which we intend for + // unsupported or unknown instructions. + _ => return None, + }; + Some(weight) + } + + fn memory_grow_cost(&self) -> Option { + // We benchmarked the memory.grow instruction with the maximum allowed pages. + // The cost for growing is therefore already included in the instruction cost. + None + } +} + +#[cfg(test)] +mod test { + use crate::tests::Test; + use super::*; + + #[test] + fn print_test_schedule() { + let schedule = Schedule::::default(); + println!("{:#?}", schedule); + } +} diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 3740952778fd3..2a2d5da225d61 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -1,196 +1,327 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module contains routines for accessing and altering a contract related state. use crate::{ exec::{AccountIdOf, StorageKey}, - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Config, TrieId, + AccountCounter, DeletionQueue, Error, + weights::WeightInfo, }; +use codec::{Encode, Decode}; use sp_std::prelude::*; +use sp_std::marker::PhantomData; use sp_io::hashing::blake2_256; -use sp_runtime::traits::Bounded; -use frame_support::{storage::child, StorageMap}; +use sp_runtime::traits::{Bounded, Saturating, Zero}; +use sp_core::crypto::UncheckedFrom; +use frame_support::{ + dispatch::DispatchResult, + StorageMap, + debug, + storage::{child::{self, KillOutcome}, StorageValue}, + traits::Get, + weights::Weight, +}; /// An error that means that the account requested either doesn't exist or represents a tombstone /// account. #[cfg_attr(test, derive(PartialEq, Eq, Debug))] pub struct ContractAbsentError; -/// Reads a storage kv pair of a contract. -/// -/// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract -/// doesn't store under the given `key` `None` is returned. -pub fn read_contract_storage(trie_id: &TrieId, key: &StorageKey) -> Option> { - child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) +#[derive(Encode, Decode)] +pub struct DeletedContract { + pair_count: u32, + trie_id: TrieId, } -/// Update a storage entry into a contract's kv storage. -/// -/// If the `opt_new_value` is `None` then the kv pair is removed. -/// -/// This function also updates the bookkeeping info such as: number of total non-empty pairs a -/// contract owns, the last block the storage was written to, etc. That's why, in contrast to -/// `read_contract_storage`, this function also requires the `account` ID. -/// -/// If the contract specified by the id `account` doesn't exist `Err` is returned.` -pub fn write_contract_storage( - account: &AccountIdOf, - trie_id: &TrieId, - key: &StorageKey, - opt_new_value: Option>, -) -> Result<(), ContractAbsentError> { - let mut new_info = match >::get(account) { - Some(ContractInfo::Alive(alive)) => alive, - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAbsentError), - }; - - let hashed_key = blake2_256(key); - let child_trie_info = &crate::child_trie_info(&trie_id); - - // In order to correctly update the book keeping we need to fetch the previous - // value of the key-value pair. - // - // It might be a bit more clean if we had an API that supported getting the size - // of the value without going through the loading of it. But at the moment of - // writing, there is no such API. - // - // That's not a show stopper in any case, since the performance cost is - // dominated by the trie traversal anyway. - let opt_prev_value = child::get_raw(&child_trie_info, &hashed_key); - - // Update the total number of KV pairs and the number of empty pairs. - match (&opt_prev_value, &opt_new_value) { - (Some(prev_value), None) => { - new_info.total_pair_count -= 1; - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - }, - (None, Some(new_value)) => { - new_info.total_pair_count += 1; - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - }, - (Some(prev_value), Some(new_value)) => { - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - } - (None, None) => {} - } +pub struct Storage(PhantomData); - // Update the total storage size. - let prev_value_len = opt_prev_value - .as_ref() - .map(|old_value| old_value.len() as u32) - .unwrap_or(0); - let new_value_len = opt_new_value - .as_ref() - .map(|new_value| new_value.len() as u32) - .unwrap_or(0); - new_info.storage_size = new_info - .storage_size - .saturating_add(new_value_len) - .saturating_sub(prev_value_len); - - new_info.last_write = Some(>::block_number()); - >::insert(&account, ContractInfo::Alive(new_info)); - - // Finally, perform the change on the storage. - match opt_new_value { - Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), - None => child::kill(&child_trie_info, &hashed_key), +impl Storage +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + /// Reads a storage kv pair of a contract. + /// + /// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract + /// doesn't store under the given `key` `None` is returned. + pub fn read(trie_id: &TrieId, key: &StorageKey) -> Option> { + child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) } - Ok(()) -} + /// Update a storage entry into a contract's kv storage. + /// + /// If the `opt_new_value` is `None` then the kv pair is removed. + /// + /// This function also updates the bookkeeping info such as: number of total non-empty pairs a + /// contract owns, the last block the storage was written to, etc. That's why, in contrast to + /// `read`, this function also requires the `account` ID. + /// + /// If the contract specified by the id `account` doesn't exist `Err` is returned.` + /// + /// # Panics + /// + /// Panics iff the `account` specified is not alive and in storage. + pub fn write( + account: &AccountIdOf, + trie_id: &TrieId, + key: &StorageKey, + opt_new_value: Option>, + ) -> DispatchResult { + let mut new_info = match >::get(account) { + Some(ContractInfo::Alive(alive)) => alive, + None | Some(ContractInfo::Tombstone(_)) => panic!("Contract not found"), + }; -/// Returns the rent allowance set for the contract give by the account id. -pub fn rent_allowance( - account: &AccountIdOf, -) -> Result, ContractAbsentError> { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.rent_allowance)) - .ok_or(ContractAbsentError) -} + let hashed_key = blake2_256(key); + let child_trie_info = &crate::child_trie_info(&trie_id); -/// Set the rent allowance for the contract given by the account id. -/// -/// Returns `Err` if the contract doesn't exist or is a tombstone. -pub fn set_rent_allowance( - account: &AccountIdOf, - rent_allowance: BalanceOf, -) -> Result<(), ContractAbsentError> { - >::mutate(account, |maybe_contract_info| match maybe_contract_info { - Some(ContractInfo::Alive(ref mut alive_info)) => { - alive_info.rent_allowance = rent_allowance; - Ok(()) + let opt_prev_len = child::len(&child_trie_info, &hashed_key); + + // Update the total number of KV pairs and the number of empty pairs. + match (&opt_prev_len, &opt_new_value) { + (Some(_), None) => { + new_info.pair_count = new_info.pair_count.checked_sub(1) + .ok_or_else(|| Error::::StorageExhausted)?; + }, + (None, Some(_)) => { + new_info.pair_count = new_info.pair_count.checked_add(1) + .ok_or_else(|| Error::::StorageExhausted)?; + }, + (Some(_), Some(_)) => {}, + (None, None) => {}, } - _ => Err(ContractAbsentError), - }) -} -/// Returns the code hash of the contract specified by `account` ID. -#[cfg(test)] -pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.code_hash)) - .ok_or(ContractAbsentError) -} + // Update the total storage size. + let prev_value_len = opt_prev_len.unwrap_or(0); + let new_value_len = opt_new_value + .as_ref() + .map(|new_value| new_value.len() as u32) + .unwrap_or(0); + new_info.storage_size = new_info + .storage_size + .checked_sub(prev_value_len) + .and_then(|val| val.checked_add(new_value_len)) + .ok_or_else(|| Error::::StorageExhausted)?; -/// Creates a new contract descriptor in the storage with the given code hash at the given address. -/// -/// Returns `Err` if there is already a contract (or a tombstone) exists at the given address. -pub fn place_contract( - account: &AccountIdOf, - trie_id: TrieId, - ch: CodeHash, -) -> Result<(), &'static str> { - >::mutate(account, |maybe_contract_info| { - if maybe_contract_info.is_some() { - return Err("Alive contract or tombstone already exists"); + new_info.last_write = Some(>::block_number()); + >::insert(&account, ContractInfo::Alive(new_info)); + + // Finally, perform the change on the storage. + match opt_new_value { + Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), + None => child::kill(&child_trie_info, &hashed_key), } - *maybe_contract_info = Some( - AliveContractInfo:: { + Ok(()) + } + + /// Returns the rent allowance set for the contract give by the account id. + pub fn rent_allowance( + account: &AccountIdOf, + ) -> Result, ContractAbsentError> + { + >::get(account) + .and_then(|i| i.as_alive().map(|i| i.rent_allowance)) + .ok_or(ContractAbsentError) + } + + /// Set the rent allowance for the contract given by the account id. + /// + /// Returns `Err` if the contract doesn't exist or is a tombstone. + pub fn set_rent_allowance( + account: &AccountIdOf, + rent_allowance: BalanceOf, + ) -> Result<(), ContractAbsentError> { + >::mutate(account, |maybe_contract_info| match maybe_contract_info { + Some(ContractInfo::Alive(ref mut alive_info)) => { + alive_info.rent_allowance = rent_allowance; + Ok(()) + } + _ => Err(ContractAbsentError), + }) + } + + /// Creates a new contract descriptor in the storage with the given code hash at the given address. + /// + /// Returns `Err` if there is already a contract (or a tombstone) exists at the given address. + pub fn place_contract( + account: &AccountIdOf, + trie_id: TrieId, + ch: CodeHash, + ) -> DispatchResult { + >::try_mutate(account, |existing| { + if existing.is_some() { + return Err(Error::::DuplicateContract.into()); + } + + let contract = AliveContractInfo:: { code_hash: ch, storage_size: 0, trie_id, - deduct_block: >::block_number(), + deduct_block: + // We want to charge rent for the first block in advance. Therefore we + // treat the contract as if it was created in the last block and then + // charge rent for it during instantiation. + >::block_number().saturating_sub(1u32.into()), rent_allowance: >::max_value(), - empty_pair_count: 0, - total_pair_count: 0, + rent_payed: >::zero(), + pair_count: 0, last_write: None, - } - .into(), + }; + + *existing = Some(contract.into()); + + Ok(()) + }) + } + + /// Push a contract's trie to the deletion queue for lazy removal. + /// + /// You must make sure that the contract is also removed or converted into a tombstone + /// when queuing the trie for deletion. + pub fn queue_trie_for_deletion(contract: &AliveContractInfo) -> DispatchResult { + if DeletionQueue::decode_len().unwrap_or(0) >= T::DeletionQueueDepth::get() as usize { + Err(Error::::DeletionQueueFull.into()) + } else { + DeletionQueue::append(DeletedContract { + pair_count: contract.pair_count, + trie_id: contract.trie_id.clone(), + }); + Ok(()) + } + } + + /// Calculates the weight that is necessary to remove one key from the trie and how many + /// of those keys can be deleted from the deletion queue given the supplied queue length + /// and weight limit. + pub fn deletion_budget(queue_len: usize, weight_limit: Weight) -> (u64, u32) { + let base_weight = T::WeightInfo::on_initialize(); + let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - + T::WeightInfo::on_initialize_per_queue_item(0); + let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0); + let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as Weight); + + // `weight_per_key` being zero makes no sense and would constitute a failure to + // benchmark properly. We opt for not removing any keys at all in this case. + let key_budget = weight_limit + .saturating_sub(base_weight) + .saturating_sub(decoding_weight) + .checked_div(weight_per_key) + .unwrap_or(0) as u32; + + (weight_per_key, key_budget) + } + + /// Delete as many items from the deletion queue possible within the supplied weight limit. + /// + /// It returns the amount of weight used for that task or `None` when no weight was used + /// apart from the base weight. + pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { + let queue_len = DeletionQueue::decode_len().unwrap_or(0); + if queue_len == 0 { + return weight_limit; + } + + let (weight_per_key, mut remaining_key_budget) = Self::deletion_budget( + queue_len, + weight_limit, ); - Ok(()) - }) -} + // We want to check whether we have enough weight to decode the queue before + // proceeding. Too little weight for decoding might happen during runtime upgrades + // which consume the whole block before the other `on_initialize` blocks are called. + if remaining_key_budget == 0 { + return weight_limit; + } + + let mut queue = DeletionQueue::get(); + + while !queue.is_empty() && remaining_key_budget > 0 { + // Cannot panic due to loop condition + let trie = &mut queue[0]; + let pair_count = trie.pair_count; + let outcome = child::kill_storage( + &crate::child_trie_info(&trie.trie_id), + Some(remaining_key_budget), + ); + if pair_count > remaining_key_budget { + // Cannot underflow because of the if condition + trie.pair_count -= remaining_key_budget; + } else { + // We do not care to preserve order. The contract is deleted already and + // noone waits for the trie to be deleted. + let removed = queue.swap_remove(0); + match outcome { + // This should not happen as our budget was large enough to remove all keys. + KillOutcome::SomeRemaining => { + debug::error!( + "After deletion keys are remaining in this child trie: {:?}", + removed.trie_id, + ); + }, + KillOutcome::AllRemoved => (), + } + } + remaining_key_budget = remaining_key_budget + .saturating_sub(remaining_key_budget.min(pair_count)); + } + + DeletionQueue::put(queue); + weight_limit.saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as Weight)) + } + + /// This generator uses inner counter for account id and applies the hash over `AccountId + + /// accountid_counter`. + pub fn generate_trie_id(account_id: &AccountIdOf) -> TrieId { + use sp_runtime::traits::Hash; + // Note that skipping a value due to error is not an issue here. + // We only need uniqueness, not sequence. + let new_seed = AccountCounter::mutate(|v| { + *v = v.wrapping_add(1); + *v + }); + + let buf: Vec<_> = account_id.as_ref().iter() + .chain(&new_seed.to_le_bytes()) + .cloned() + .collect(); + T::Hashing::hash(&buf).as_ref().into() + } -/// Removes the contract and all the storage associated with it. -/// -/// This function doesn't affect the account. -pub fn destroy_contract(address: &AccountIdOf, trie_id: &TrieId) { - >::remove(address); - child::kill_storage(&crate::child_trie_info(&trie_id)); + /// Returns the code hash of the contract specified by `account` ID. + #[cfg(test)] + pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> + { + >::get(account) + .and_then(|i| i.as_alive().map(|i| i.code_hash)) + .ok_or(ContractAbsentError) + } + + /// Fill up the queue in order to exercise the limits during testing. + #[cfg(test)] + pub fn fill_queue_with_dummies() { + let queue: Vec<_> = (0..T::DeletionQueueDepth::get()).map(|_| DeletedContract { + pair_count: 0, + trie_id: vec![], + }) + .collect(); + DeletionQueue::put(queue); + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index c2d9ed6642559..62768641ac165 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -1,94 +1,97 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::{ - BalanceOf, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, Module, - RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, gas::Gas, - Error, Config, RuntimeReturnCode, + BalanceOf, ContractInfo, ContractInfoOf, Module, + RawAliveContractInfo, RawEvent, Config, Schedule, gas::Gas, + Error, RuntimeReturnCode, storage::Storage, + chain_extension::{ + Result as ExtensionResult, Environment, ChainExtension, Ext, SysConfig, RetVal, + UncheckedFrom, InitState, ReturnFlags, + }, + exec::{AccountIdOf, Executable}, wasm::PrefabWasmModule, }; use assert_matches::assert_matches; -use hex_literal::*; use codec::Encode; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, Hash, IdentityLookup, Convert}, testing::{Header, H256}, + AccountId32, Perbill, }; +use sp_io::hashing::blake2_256; use frame_support::{ - assert_ok, assert_err_ignore_postinfo, impl_outer_dispatch, impl_outer_event, - impl_outer_origin, parameter_types, StorageMap, StorageValue, - traits::{Currency, Get, ReservableCurrency}, - weights::{Weight, PostDispatchInfo}, + assert_ok, assert_err, assert_err_ignore_postinfo, + parameter_types, StorageMap, assert_storage_noop, + traits::{Currency, ReservableCurrency, OnInitialize}, + weights::{Weight, PostDispatchInfo, DispatchClass, constants::WEIGHT_PER_SECOND}, dispatch::DispatchErrorWithPostInfo, + storage::child, }; -use std::cell::RefCell; use frame_system::{self as system, EventRecord, Phase}; - -mod contracts { - // Re-export contents of the root. This basically - // needs to give a name for the current crate. - // This hack is required for `impl_outer_event!`. - pub use super::super::*; - pub use frame_support::impl_outer_event; -} - -use pallet_balances as balances; - -impl_outer_event! { - pub enum MetaEvent for Test { - system, - balances, - contracts, - } -} -impl_outer_origin! { - pub enum Origin for Test where system = frame_system { } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - balances::Balances, - contracts::Contracts, +use pretty_assertions::assert_eq; + +use crate as pallet_contracts; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Randomness: pallet_randomness_collective_flip::{Module, Call, Storage}, + Contracts: pallet_contracts::{Module, Call, Config, Storage, Event}, } -} +); #[macro_use] pub mod test_utils { use super::{Test, Balances}; - use crate::{ContractInfoOf, TrieIdGenerator, CodeHash}; - use crate::storage::{write_contract_storage, read_contract_storage}; - use crate::exec::StorageKey; + use crate::{ + ContractInfoOf, CodeHash, + storage::Storage, + exec::{StorageKey, AccountIdOf}, + Module as Contracts, + }; use frame_support::{StorageMap, traits::Currency}; - pub fn set_storage(addr: &u64, key: &StorageKey, value: Option>) { + pub fn set_storage(addr: &AccountIdOf, key: &StorageKey, value: Option>) { let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); - write_contract_storage::(&1, &contract_info.trie_id, key, value).unwrap(); + Storage::::write(addr, &contract_info.trie_id, key, value).unwrap(); } - pub fn get_storage(addr: &u64, key: &StorageKey) -> Option> { + pub fn get_storage(addr: &AccountIdOf, key: &StorageKey) -> Option> { let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); - read_contract_storage(&contract_info.trie_id, key) + Storage::::read(&contract_info.trie_id, key) } - pub fn place_contract(address: &u64, code_hash: CodeHash) { - let trie_id = ::TrieIdGenerator::trie_id(address); - crate::storage::place_contract::(&address, trie_id, code_hash).unwrap() + pub fn place_contract(address: &AccountIdOf, code_hash: CodeHash) { + let trie_id = Storage::::generate_trie_id(address); + set_balance(address, Contracts::::subsistence_threshold() * 10); + Storage::::place_contract(&address, trie_id, code_hash).unwrap(); } - pub fn set_balance(who: &u64, amount: u64) { + pub fn set_balance(who: &AccountIdOf, amount: u64) { let imbalance = Balances::deposit_creating(who, amount); drop(imbalance); } - pub fn get_balance(who: &u64) -> u64 { + pub fn get_balance(who: &AccountIdOf) -> u64 { Balances::free_balance(who) } macro_rules! assert_return_code { @@ -97,56 +100,130 @@ pub mod test_utils { assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); }} } + macro_rules! assert_refcount { + ( $code_hash:expr , $should:expr $(,)? ) => {{ + let is = crate::CodeStorage::::get($code_hash) + .map(|m| m.refcount()) + .unwrap_or(0); + assert_eq!(is, $should); + }} + } } thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + static TEST_EXTENSION: sp_std::cell::RefCell = Default::default(); +} + +pub struct TestExtension { + enabled: bool, + last_seen_buffer: Vec, + last_seen_inputs: (u32, u32, u32, u32), +} + +impl TestExtension { + fn disable() { + TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) + } + + fn last_seen_buffer() -> Vec { + TEST_EXTENSION.with(|e| e.borrow().last_seen_buffer.clone()) + } + + fn last_seen_inputs() -> (u32, u32, u32, u32) { + TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs.clone()) + } +} + +impl Default for TestExtension { + fn default() -> Self { + Self { + enabled: true, + last_seen_buffer: vec![], + last_seen_inputs: (0, 0, 0, 0), + } + } } -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } +impl ChainExtension for TestExtension { + fn call(func_id: u32, env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + match func_id { + 0 => { + let mut env = env.buf_in_buf_out(); + let input = env.read(2)?; + env.write(&input, false, None)?; + TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); + Ok(RetVal::Converging(func_id)) + }, + 1 => { + let env = env.only_in(); + TEST_EXTENSION.with(|e| + e.borrow_mut().last_seen_inputs = ( + env.val0(), env.val1(), env.val2(), env.val3() + ) + ); + Ok(RetVal::Converging(func_id)) + }, + 2 => { + let mut env = env.buf_in_buf_out(); + let weight = env.read(2)?[1].into(); + env.charge_weight(weight)?; + Ok(RetVal::Converging(func_id)) + }, + 3 => { + Ok(RetVal::Diverging{ + flags: ReturnFlags::REVERT, + data: vec![42, 99], + }) + }, + _ => { + panic!("Passed unknown func_id to test chain extension: {}", func_id); + } + } + } + + fn enabled() -> bool { + TEST_EXTENSION.with(|e| e.borrow().enabled) + } } -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); + pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; type Call = Call; type Hashing = BlakeTwo256; - type AccountId = u64; + type AccountId = AccountId32; type Lookup = IdentityLookup; type Header = Header; - type Event = MetaEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; - type Event = MetaEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -155,7 +232,7 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 1; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -164,12 +241,15 @@ impl pallet_timestamp::Trait for Test { parameter_types! { pub const SignedClaimHandicap: u64 = 2; pub const TombstoneDeposit: u64 = 16; - pub const StorageSizeOffset: u32 = 8; - pub const RentByteFee: u64 = 4; - pub const RentDepositOffset: u64 = 10_000; - pub const SurchargeReward: u64 = 150; + pub const DepositPerContract: u64 = 8 * DepositPerStorageByte::get(); + pub const DepositPerStorageByte: u64 = 10_000; + pub const DepositPerStorageItem: u64 = 10_000; + pub RentFraction: Perbill = Perbill::from_rational_approximation(4u32, 10_000u32); + pub const SurchargeReward: u64 = 500_000; pub const MaxDepth: u32 = 100; pub const MaxValueSize: u32 = 16_384; + pub const DeletionQueueDepth: u32 = 1024; + pub const DeletionWeightLimit: Weight = 500_000_000_000; } parameter_types! { @@ -182,58 +262,32 @@ impl Convert> for Test { } } -impl Trait for Test { +impl Config for Test { type Time = Timestamp; type Randomness = Randomness; type Currency = Balances; - type DetermineContractAddress = DummyContractAddressFor; - type Event = MetaEvent; - type TrieIdGenerator = DummyTrieIdGenerator; + type Event = Event; type RentPayment = (); type SignedClaimHandicap = SignedClaimHandicap; type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = StorageSizeOffset; - type RentByteFee = RentByteFee; - type RentDepositOffset = RentDepositOffset; + type DepositPerContract = DepositPerContract; + type DepositPerStorageByte = DepositPerStorageByte; + type DepositPerStorageItem = DepositPerStorageItem; + type RentFraction = RentFraction; type SurchargeReward = SurchargeReward; type MaxDepth = MaxDepth; type MaxValueSize = MaxValueSize; type WeightPrice = Self; type WeightInfo = (); + type ChainExtension = TestExtension; + type DeletionQueueDepth = DeletionQueueDepth; + type DeletionWeightLimit = DeletionWeightLimit; } -type Balances = pallet_balances::Module; -type Timestamp = pallet_timestamp::Module; -type Contracts = Module; -type System = frame_system::Module; -type Randomness = pallet_randomness_collective_flip::Module; - -pub struct DummyContractAddressFor; -impl ContractAddressFor for DummyContractAddressFor { - fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { - *origin + 1 - } -} - -pub struct DummyTrieIdGenerator; -impl TrieIdGenerator for DummyTrieIdGenerator { - fn trie_id(account_id: &u64) -> TrieId { - let new_seed = super::AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut res = vec![]; - res.extend_from_slice(&new_seed.to_le_bytes()); - res.extend_from_slice(&account_id.to_le_bytes()); - res - } -} - -const ALICE: u64 = 1; -const BOB: u64 = 2; -const CHARLIE: u64 = 3; -const DJANGO: u64 = 4; +pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); const GAS_LIMIT: Gas = 10_000_000_000; @@ -261,7 +315,7 @@ impl ExtBuilder { pallet_balances::GenesisConfig:: { balances: vec![], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig { + pallet_contracts::GenesisConfig { current_schedule: Schedule:: { enable_println: true, ..Default::default() @@ -281,7 +335,7 @@ fn compile_module( fixture_name: &str, ) -> wat::Result<(Vec, ::Output)> where - T: frame_system::Trait, + T: frame_system::Config, { let fixture_path = ["fixtures/", fixture_name, ".wat"].concat(); let wasm_binary = wat::parse_file(fixture_path)?; @@ -291,12 +345,12 @@ where // Perform a call to a plain account. // The actual transfer fails because we can only call contracts. -// Then we check that no gas was used because the base costs for calling are either charged -// as part of the `call` extrinsic or by `seal_call`. +// Then we check that at least the base costs where charged (no runtime gas costs.) #[test] fn calling_plain_account_fails() { ExtBuilder::default().build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 100_000_000); + let base_cost = <::WeightInfo as crate::WeightInfo>::call(); assert_eq!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, Vec::new()), @@ -304,7 +358,7 @@ fn calling_plain_account_fails() { DispatchErrorWithPostInfo { error: Error::::NotCallable.into(), post_info: PostDispatchInfo { - actual_weight: Some(0), + actual_weight: Some(base_cost), pays_fee: Default::default(), }, } @@ -318,8 +372,8 @@ fn account_removal_does_not_remove_storage() { use self::test_utils::{set_storage, get_storage}; ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let trie_id1 = ::TrieIdGenerator::trie_id(&1); - let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let trie_id1 = Storage::::generate_trie_id(&ALICE); + let trie_id2 = Storage::::generate_trie_id(&BOB); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -328,11 +382,11 @@ fn account_removal_does_not_remove_storage() { let alice_contract_info = ContractInfo::Alive(RawAliveContractInfo { trie_id: trie_id1.clone(), storage_size: 0, - empty_pair_count: 0, - total_pair_count: 0, + pair_count: 0, deduct_block: System::block_number(), code_hash: H256::repeat_byte(1), rent_allowance: 40, + rent_payed: 0, last_write: None, }); let _ = Balances::deposit_creating(&ALICE, 110); @@ -343,11 +397,11 @@ fn account_removal_does_not_remove_storage() { let bob_contract_info = ContractInfo::Alive(RawAliveContractInfo { trie_id: trie_id2.clone(), storage_size: 0, - empty_pair_count: 0, - total_pair_count: 0, + pair_count: 0, deduct_block: System::block_number(), code_hash: H256::repeat_byte(2), rent_allowance: 40, + rent_payed: 0, last_write: None, }); let _ = Balances::deposit_creating(&BOB, 110); @@ -397,68 +451,72 @@ fn instantiate_and_call_and_deposit_event() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = super::Config::::subsistence_threshold_uncached(); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + let subsistence = Module::::subsistence_threshold(); // Check at the end to get hash on error easily - let creation = Contracts::instantiate( + let creation = Contracts::instantiate_with_code( Origin::signed(ALICE), - subsistence, + subsistence * 100, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], ); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - pretty_assertions::assert_eq!(System::events(), vec![ + assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: Event::frame_system(frame_system::Event::NewAccount(ALICE.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: Event::pallet_balances( + pallet_balances::Event::Endowed(ALICE, 1_000_000) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + event: Event::frame_system(frame_system::Event::NewAccount(addr.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), + event: Event::pallet_balances( + pallet_balances::Event::Endowed(addr.clone(), subsistence * 100) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, subsistence) + event: Event::pallet_balances( + pallet_balances::Event::Transfer(ALICE, addr.clone(), subsistence * 100) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(ALICE, BOB, subsistence) - ), + event: Event::pallet_contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::ContractExecution(BOB, vec![1, 2, 3, 4])), + event: Event::pallet_contracts( + RawEvent::ContractEmitted(addr.clone(), vec![1, 2, 3, 4]) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), + event: Event::pallet_contracts(RawEvent::Instantiated(ALICE, addr.clone())), topics: vec![], - } + }, ]); assert_ok!(creation); - assert!(ContractInfoOf::::contains_key(BOB)); + assert!(ContractInfoOf::::contains_key(&addr)); }); } @@ -472,36 +530,33 @@ fn deposit_event_max_value_limit() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 30_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Call contract with allowed storage value. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT * 2, // we are copying a huge buffer, - ::MaxValueSize::get().encode(), + ::MaxValueSize::get().encode(), )); // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, - (::MaxValueSize::get() + 1).encode(), + (::MaxValueSize::get() + 1).encode(), ), Error::::ValueTooLarge, ); @@ -511,6 +566,7 @@ fn deposit_event_max_value_limit() { #[test] fn run_out_of_gas() { let (wasm, code_hash) = compile_module::("run_out_of_gas").unwrap(); + let subsistence = Module::::subsistence_threshold(); ExtBuilder::default() .existential_deposit(50) @@ -518,22 +574,22 @@ fn run_out_of_gas() { .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 100, + 100 * subsistence, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Call the contract with a fixed gas limit. It must run out of gas because it just // loops forever. assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, // newly created account + addr, // newly created account 0, 67_500_000, vec![], @@ -545,50 +601,14 @@ fn run_out_of_gas() { /// Input data for each call in set_rent code mod call { - pub fn set_storage_4_byte() -> Vec { vec![] } - pub fn remove_storage_4_byte() -> Vec { vec![0] } - pub fn transfer() -> Vec { vec![0, 0] } - pub fn null() -> Vec { vec![0, 0, 0] } -} - -/// Test correspondence of set_rent code and its hash. -/// Also test that encoded extrinsic in code correspond to the correct transfer -#[test] -fn test_set_rent_code_and_hash() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // If you ever need to update the wasm source this test will fail - // and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - }); + use super::{AccountIdOf, Test}; + pub fn set_storage_4_byte() -> Vec { 0u32.to_le_bytes().to_vec() } + pub fn remove_storage_4_byte() -> Vec { 1u32.to_le_bytes().to_vec() } + #[allow(dead_code)] + pub fn transfer(to: &AccountIdOf) -> Vec { + 2u32.to_le_bytes().iter().chain(AsRef::<[u8]>::as_ref(to)).cloned().collect() + } + pub fn null() -> Vec { 3u32.to_le_bytes().to_vec() } } #[test] @@ -602,15 +622,17 @@ fn storage_size() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 30_000, GAS_LIMIT, - code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + wasm, + // rent_allowance + ::Balance::from(10_000u32).encode(), + vec![], )); - let bob_contract = ContractInfoOf::::get(BOB) + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -619,22 +641,18 @@ fn storage_size() { 4 ); assert_eq!( - bob_contract.total_pair_count, + bob_contract.pair_count, 1, ); - assert_eq!( - bob_contract.empty_pair_count, - 0, - ); assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, call::set_storage_4_byte() )); - let bob_contract = ContractInfoOf::::get(BOB) + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -643,22 +661,18 @@ fn storage_size() { 4 + 4 ); assert_eq!( - bob_contract.total_pair_count, + bob_contract.pair_count, 2, ); - assert_eq!( - bob_contract.empty_pair_count, - 0, - ); assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, call::remove_storage_4_byte() )); - let bob_contract = ContractInfoOf::::get(BOB) + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -667,13 +681,9 @@ fn storage_size() { 4 ); assert_eq!( - bob_contract.total_pair_count, + bob_contract.pair_count, 1, ); - assert_eq!( - bob_contract.empty_pair_count, - 0, - ); }); } @@ -685,15 +695,16 @@ fn empty_kv_pairs() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 30_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); - let bob_contract = ContractInfoOf::::get(BOB) + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -703,11 +714,7 @@ fn empty_kv_pairs() { 0, ); assert_eq!( - bob_contract.total_pair_count, - 1, - ); - assert_eq!( - bob_contract.empty_pair_count, + bob_contract.pair_count, 1, ); }); @@ -717,7 +724,6 @@ fn initialize_block(number: u64) { System::initialize( &number, &[0u8; 32].into(), - &[0u8; 32].into(), &Default::default(), Default::default(), ); @@ -726,6 +732,8 @@ fn initialize_block(number: u64) { #[test] fn deduct_blocks() { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); + let endowment: BalanceOf = 100_000; + let allowance: BalanceOf = 70_000; ExtBuilder::default() .existential_deposit(50) @@ -733,95 +741,107 @@ fn deduct_blocks() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 30_000, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + endowment, + GAS_LIMIT, + wasm, + allowance.encode(), + vec![], )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + let code_len: BalanceOf = + PrefabWasmModule::::from_storage_noinstr(contract.code_hash) + .unwrap() + .occupied_storage() + .into(); + + // The instantiation deducted the rent for one block immediately + let rent0 = ::RentFraction::get() + // (base_deposit(8) + bytes in storage(4) + size of code) * byte_price + // + 1 storage item (10_000) - free_balance + .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - endowment) + // blocks to rent + * 1; + assert!(rent0 > 0); + assert_eq!(contract.rent_allowance, allowance - rent0); + assert_eq!(contract.deduct_block, 1); + assert_eq!(Balances::free_balance(&addr), endowment - rent0); // Advance 4 blocks initialize_block(5); // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Check result - let rent = (8 + 4 - 3) // storage size = size_offset + deploy_set_storage - deposit_offset - * 4 // rent byte price - * 4; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent); - assert_eq!(bob_contract.deduct_block, 5); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent); + let rent = ::RentFraction::get() + .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - (endowment - rent0)) + * 4; + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(contract.rent_allowance, allowance - rent0 - rent); + assert_eq!(contract.deduct_block, 5); + assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent); - // Advance 7 blocks more - initialize_block(12); + // Advance 2 blocks more + initialize_block(7); // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Check result - let rent_2 = (8 + 4 - 2) // storage size = size_offset + deploy_set_storage - deposit_offset - * 4 // rent byte price - * 7; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); - assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); + let rent_2 = ::RentFraction::get() + .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - (endowment - rent0 - rent)) + * 2; + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(contract.rent_allowance, allowance - rent0 - rent - rent_2); + assert_eq!(contract.deduct_block, 7); + assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent - rent_2); // Second call on same block should have no effect on rent - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); - - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); - assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(contract.rent_allowance, allowance - rent0 - rent - rent_2); + assert_eq!(contract.deduct_block, 7); + assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent - rent_2) }); } -#[test] -fn call_contract_removals() { - removals(|| { - // Call on already-removed account might fail, and this is fine. - let _ = Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()); - true - }); -} - #[test] fn inherent_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok()); + removals(|addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok()); } #[test] fn signed_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok()); + removals(|addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok()); } #[test] fn claim_surcharge_malus() { // Test surcharge malus for inherent - claim_surcharge(4, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), false); + claim_surcharge(9, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(7, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), false); // Test surcharge malus for signed - claim_surcharge(4, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); + claim_surcharge(9, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), true); + claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge(7, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); } /// Claim surcharge with the given trigger_call at the given blocks. /// If `removes` is true then assert that the contract is a tombstone. -fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) { +fn claim_surcharge(blocks: u64, trigger_call: impl Fn(AccountIdOf) -> bool, removes: bool) { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); ExtBuilder::default() @@ -830,24 +850,26 @@ fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 100, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + 100_000, + GAS_LIMIT, + wasm, + ::Balance::from(30_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Advance blocks initialize_block(blocks); // Trigger rent through call - assert!(trigger_call()); + assert_eq!(trigger_call(addr.clone()), removes); if removes { - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); } else { - assert!(ContractInfoOf::::get(BOB).unwrap().get_alive().is_some()); + assert!(ContractInfoOf::::get(&addr).unwrap().get_alive().is_some()); } }); } @@ -857,7 +879,7 @@ fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) /// * if allowance is exceeded /// * if balance is reached and balance < subsistence threshold /// * this case cannot be triggered by a contract: we check whether a tombstone is left -fn removals(trigger_call: impl Fn() -> bool) { +fn removals(trigger_call: impl Fn(AccountIdOf) -> bool) { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); // Balance reached and superior to subsistence threshold @@ -867,36 +889,44 @@ fn removals(trigger_call: impl Fn() -> bool) { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 100, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + 70_000, + GAS_LIMIT, + wasm.clone(), + ::Balance::from(100_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let allowance = ContractInfoOf::::get(&addr) + .unwrap().get_alive().unwrap().rent_allowance; + let balance = Balances::free_balance(&addr); - let subsistence_threshold = 50 /*existential_deposit*/ + 16 /*tombstone_deposit*/; + let subsistence_threshold = Module::::subsistence_threshold(); // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 1_000); - assert_eq!(Balances::free_balance(BOB), 100); + assert!(!trigger_call(addr.clone())); + assert_eq!( + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance, + allowance, + ); + assert_eq!(Balances::free_balance(&addr), balance); // Advance blocks - initialize_block(10); + initialize_block(27); - // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + // Trigger rent through call (should remove the contract) + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); // Advance blocks - initialize_block(20); + initialize_block(30); // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(!trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); }); // Allowance exceeded @@ -906,49 +936,53 @@ fn removals(trigger_call: impl Fn() -> bool) { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 1_000, + 100_000, GAS_LIMIT, - code_hash.into(), - ::Balance::from(100u32).encode() // rent allowance + wasm.clone(), + ::Balance::from(70_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let allowance = ContractInfoOf::::get(&addr) + .unwrap().get_alive().unwrap().rent_allowance; + let balance = Balances::free_balance(&addr); // Trigger rent must have no effect - assert!(trigger_call()); + assert!(!trigger_call(addr.clone())); assert_eq!( - ContractInfoOf::::get(BOB) + ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap() .rent_allowance, - 100 + allowance, ); - assert_eq!(Balances::free_balance(BOB), 1_000); + assert_eq!(Balances::free_balance(&addr), balance); // Advance blocks - initialize_block(10); + initialize_block(27); // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB) + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr) .unwrap() .get_tombstone() .is_some()); // Balance should be initial balance - initial rent_allowance - assert_eq!(Balances::free_balance(BOB), 900); + assert_eq!(Balances::free_balance(&addr), 30_000); // Advance blocks initialize_block(20); // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB) + assert!(!trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr) .unwrap() .get_tombstone() .is_some()); - assert_eq!(Balances::free_balance(BOB), 900); + assert_eq!(Balances::free_balance(&addr), 30_000); }); // Balance reached and inferior to subsistence threshold @@ -957,66 +991,55 @@ fn removals(trigger_call: impl Fn() -> bool) { .build() .execute_with(|| { // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence_threshold = - Balances::minimum_balance() + ::TombstoneDeposit::get(); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( + let subsistence_threshold = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, subsistence_threshold * 1000); + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 50 + subsistence_threshold, + subsistence_threshold * 100, GAS_LIMIT, - code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + wasm, + (subsistence_threshold * 100).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let allowance = ContractInfoOf::::get(&addr) + .unwrap().get_alive().unwrap().rent_allowance; + let balance = Balances::free_balance(&addr); // Trigger rent must have no effect - assert!(trigger_call()); + assert!(!trigger_call(addr.clone())); assert_eq!( - ContractInfoOf::::get(BOB) + ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap() .rent_allowance, - 1_000 + allowance, ); assert_eq!( - Balances::free_balance(BOB), - 50 + subsistence_threshold, + Balances::free_balance(&addr), + balance, ); - // Transfer funds - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - call::transfer() - )); - assert_eq!( - ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap() - .rent_allowance, - 1_000 - ); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + // Make contract have exactly the subsistence threshold + Balances::make_free_balance_be(&addr, subsistence_threshold); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); - // Advance blocks + // Advance blocks (should remove as balance is exactly subsistence) initialize_block(10); // Trigger rent through call - assert!(trigger_call()); - assert_matches!(ContractInfoOf::::get(BOB), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(trigger_call(addr.clone())); + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); // Advance blocks initialize_block(20); // Trigger rent must have no effect - assert!(trigger_call()); - assert_matches!(ContractInfoOf::::get(BOB), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(!trigger_call(addr.clone())); + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); }); } @@ -1031,39 +1054,42 @@ fn call_removed_contract() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 100, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + 30_000, + GAS_LIMIT, + wasm, + // rent allowance + ::Balance::from(10_000u32).encode(), + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Calling contract should succeed. - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Advance blocks - initialize_block(10); + initialize_block(27); - // Calling contract should remove contract and fail. + // Calling contract should deny access because rent cannot be paid. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), Error::::NotCallable ); - // Calling a contract that is about to evict shall emit an event. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), - topics: vec![], - }, - ]); + // No event is generated because the contract is not actually removed. + assert_eq!(System::events(), vec![]); // Subsequent contract calls should also fail. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), Error::::NotCallable ); + + // A snitch can now remove the contract + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr.clone(), Some(ALICE))); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); }) } @@ -1077,151 +1103,241 @@ fn default_rent_allowance_on_instantiate() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 30_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + let code_len: BalanceOf = + PrefabWasmModule::::from_storage_noinstr(contract.code_hash) + .unwrap() + .occupied_storage() + .into(); - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); + // The instantiation deducted the rent for one block immediately + let first_rent = ::RentFraction::get() + // (base_deposit(8) + code_len) * byte_price - free_balance + .mul_ceil((8 + code_len) * 10_000 - 30_000) + // blocks to rent + * 1; + assert_eq!(contract.rent_allowance, >::max_value() - first_rent); // Advance blocks initialize_block(5); // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Check contract is still alive - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive(); - assert!(bob_contract.is_some()) + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive(); + assert!(contract.is_some()) }); } #[test] fn restorations_dirty_storage_and_different_storage() { - restoration(true, true); + restoration(true, true, false); } #[test] fn restorations_dirty_storage() { - restoration(false, true); + restoration(false, true, false); } #[test] fn restoration_different_storage() { - restoration(true, false); + restoration(true, false, false); +} + +#[test] +fn restoration_code_evicted() { + restoration(false, false, true); } #[test] fn restoration_success() { - restoration(false, false); + restoration(false, false, false); } -fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: bool) { +fn restoration( + test_different_storage: bool, + test_restore_to_with_dirty_storage: bool, + test_code_evicted: bool +) { let (set_rent_wasm, set_rent_code_hash) = compile_module::("set_rent").unwrap(); let (restoration_wasm, restoration_code_hash) = compile_module::("restoration").unwrap(); + let allowance: ::Balance = 10_000; ExtBuilder::default() .existential_deposit(50) .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), restoration_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), set_rent_wasm)); - // If you ever need to update the wasm source this test will fail - // and will show you the actual hash. - assert_eq!(System::events(), vec![ + // Create an account with address `BOB` with code `CODE_SET_RENT`. + // The input parameter sets the rent allowance to 0. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + set_rent_wasm.clone(), + allowance.encode(), + vec![], + )); + let addr_bob = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[]); + + let mut events = vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: Event::frame_system(frame_system::Event::NewAccount(ALICE)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: Event::pallet_balances( + pallet_balances::Event::Endowed(ALICE, 1_000_000) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(restoration_code_hash.into())), + event: Event::frame_system(frame_system::Event::NewAccount(addr_bob.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(set_rent_code_hash.into())), + event: Event::pallet_balances( + pallet_balances::Event::Endowed(addr_bob.clone(), 30_000) + ), topics: vec![], }, - ]); + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_balances( + pallet_balances::Event::Transfer(ALICE, addr_bob.clone(), 30_000) + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_contracts(RawEvent::CodeStored(set_rent_code_hash.into())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_contracts(RawEvent::Instantiated(ALICE, addr_bob.clone())), + topics: vec![], + }, + ]; - // Create an account with address `BOB` with code `CODE_SET_RENT`. - // The input parameter sets the rent allowance to 0. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - set_rent_code_hash.into(), - ::Balance::from(0u32).encode() - )); + // Create another contract from the same code in order to increment the codes + // refcounter so that it stays on chain. + if !test_code_evicted { + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 20_000, + GAS_LIMIT, + set_rent_wasm, + allowance.encode(), + vec![1], + )); + assert_refcount!(set_rent_code_hash, 2); + let addr_dummy = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[1]); + events.extend([ + EventRecord { + phase: Phase::Initialization, + event: Event::frame_system(frame_system::Event::NewAccount(addr_dummy.clone())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_balances( + pallet_balances::Event::Endowed(addr_dummy.clone(), 20_000) + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_balances( + pallet_balances::Event::Transfer(ALICE, addr_dummy.clone(), 20_000) + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_contracts(RawEvent::Instantiated(ALICE, addr_dummy.clone())), + topics: vec![], + }, + ].iter().cloned()); + } + + assert_eq!(System::events(), events); - // Check if `BOB` was created successfully and that the rent allowance is - // set to 0. - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 0); + // Check if `BOB` was created successfully and that the rent allowance is below what + // we specified as the first rent was already collected. + let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap().get_alive().unwrap(); + assert!(bob_contract.rent_allowance < allowance); if test_different_storage { assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, 0, GAS_LIMIT, + addr_bob.clone(), 0, GAS_LIMIT, call::set_storage_4_byte()) ); } - // Advance 4 blocks, to the 5th. - initialize_block(5); + // Advance blocks in order to make the contract run out of money for rent. + initialize_block(27); - // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 0 - // we expect that it will get removed leaving tombstone. + // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 20_000 + // we expect that it is no longer callable but keeps existing until someone + // calls `claim_surcharge`. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), + Contracts::call( + Origin::signed(ALICE), addr_bob.clone(), 0, GAS_LIMIT, call::null() + ), Error::::NotCallable ); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Evicted(BOB.clone(), true) - ), - topics: vec![], - }, - ]); + assert!(System::events().is_empty()); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_alive().is_some()); + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr_bob.clone(), Some(ALICE))); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); + if test_code_evicted { + assert_refcount!(set_rent_code_hash, 0); + } else { + assert_refcount!(set_rent_code_hash, 1); + } // Create another account with the address `DJANGO` with `CODE_RESTORATION`. // // Note that we can't use `ALICE` for creating `DJANGO` so we create yet another // account `CHARLIE` and create `DJANGO` with it. let _ = Balances::deposit_creating(&CHARLIE, 1_000_000); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(CHARLIE), 30_000, GAS_LIMIT, - restoration_code_hash.into(), - ::Balance::from(0u32).encode() + restoration_wasm, + vec![], + vec![], )); + let addr_django = Contracts::contract_address(&CHARLIE, &restoration_code_hash, &[]); // Before performing a call to `DJANGO` save its original trie id. - let django_trie_id = ContractInfoOf::::get(DJANGO).unwrap() + let django_trie_id = ContractInfoOf::::get(&addr_django).unwrap() .get_alive().unwrap().trie_id; + // The trie is regarded as 'dirty' when it was written to in the current block. if !test_restore_to_with_dirty_storage { - // Advance 1 block, to the 6th. - initialize_block(6); + // Advance 1 block. + initialize_block(28); } // Perform a call to `DJANGO`. This should either perform restoration successfully or @@ -1229,98 +1345,140 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: let perform_the_restoration = || { Contracts::call( Origin::signed(ALICE), - DJANGO, + addr_django.clone(), 0, GAS_LIMIT, - set_rent_code_hash.as_ref().to_vec(), + set_rent_code_hash + .as_ref() + .iter() + .chain(AsRef::<[u8]>::as_ref(&addr_bob)) + .cloned() + .collect(), ) }; - if test_different_storage || test_restore_to_with_dirty_storage { + // The key that is used in the restorer contract but is not in the target contract. + // Is supplied as delta to the restoration. We need it to check whether the key + // is properly removed on success but still there on failure. + let delta_key = { + let mut key = [0u8; 32]; + key[0] = 1; + key + }; + + if test_different_storage || test_restore_to_with_dirty_storage || test_code_evicted { // Parametrization of the test imply restoration failure. Check that `DJANGO` aka // restoration contract is still in place and also that `BOB` doesn't exist. - - assert_err_ignore_postinfo!( - perform_the_restoration(), - Error::::ContractTrapped, - ); - - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - let django_contract = ContractInfoOf::::get(DJANGO).unwrap() + let result = perform_the_restoration(); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); + let django_contract = ContractInfoOf::::get(&addr_django).unwrap() .get_alive().unwrap(); assert_eq!(django_contract.storage_size, 8); assert_eq!(django_contract.trie_id, django_trie_id); assert_eq!(django_contract.deduct_block, System::block_number()); - match (test_different_storage, test_restore_to_with_dirty_storage) { - (true, false) => { + assert_eq!( + Storage::::read(&django_trie_id, &delta_key), + Some(vec![40, 0, 0, 0]), + ); + match ( + test_different_storage, + test_restore_to_with_dirty_storage, + test_code_evicted + ) { + (true, false, false) => { + assert_err_ignore_postinfo!( + result, Error::::InvalidTombstone, + ); assert_eq!(System::events(), vec![]); } - (_, true) => { - pretty_assertions::assert_eq!(System::events(), vec![ + (_, true, false) => { + assert_err_ignore_postinfo!( + result, Error::::InvalidContractOrigin, + ); + assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), + event: Event::pallet_contracts(RawEvent::Evicted(addr_bob)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), + event: Event::frame_system(frame_system::Event::NewAccount(CHARLIE)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(CHARLIE, 1_000_000)), + event: Event::pallet_balances(pallet_balances::Event::Endowed(CHARLIE, 1_000_000)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(DJANGO)), + event: Event::frame_system(frame_system::Event::NewAccount(addr_django.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(DJANGO, 30_000)), + event: Event::pallet_balances(pallet_balances::Event::Endowed(addr_django.clone(), 30_000)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(CHARLIE, DJANGO, 30_000) + event: Event::pallet_balances( + pallet_balances::Event::Transfer(CHARLIE, addr_django.clone(), 30_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, DJANGO)), + event: Event::pallet_contracts(RawEvent::CodeStored(restoration_code_hash)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_contracts(RawEvent::Instantiated(CHARLIE, addr_django.clone())), topics: vec![], }, + ]); - } + }, + (false, false, true) => { + assert_err_ignore_postinfo!( + result, Error::::CodeNotFound, + ); + assert_refcount!(set_rent_code_hash, 0); + assert_eq!(System::events(), vec![]); + }, _ => unreachable!(), } } else { assert_ok!(perform_the_restoration()); + assert_refcount!(set_rent_code_hash, 2); // Here we expect that the restoration is succeeded. Check that the restoration // contract `DJANGO` ceased to exist and that `BOB` returned back. - println!("{:?}", ContractInfoOf::::get(BOB)); - let bob_contract = ContractInfoOf::::get(BOB).unwrap() + let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap() .get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 50); assert_eq!(bob_contract.storage_size, 4); assert_eq!(bob_contract.trie_id, django_trie_id); assert_eq!(bob_contract.deduct_block, System::block_number()); - assert!(ContractInfoOf::::get(DJANGO).is_none()); + assert!(ContractInfoOf::::get(&addr_django).is_none()); + assert_matches!(Storage::::read(&django_trie_id, &delta_key), None); assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), + event: Event::pallet_contracts(RawEvent::CodeRemoved(restoration_code_hash)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::frame_system(system::Event::KilledAccount(addr_django.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50) + event: Event::pallet_contracts( + RawEvent::Restored(addr_django, addr_bob, bob_contract.code_hash, 50) ), topics: vec![], }, @@ -1339,36 +1497,34 @@ fn storage_max_value_limit() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 30_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); // Call contract with allowed storage value. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT * 2, // we are copying a huge buffer - ::MaxValueSize::get().encode(), + ::MaxValueSize::get().encode(), )); // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, - (::MaxValueSize::get() + 1).encode(), + (::MaxValueSize::get() + 1).encode(), ), Error::::ValueTooLarge, ); @@ -1386,22 +1542,28 @@ fn deploy_and_call_other_contract() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_wasm)); - - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - caller_code_hash.into(), + caller_wasm, + vec![], vec![], )); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + callee_wasm, + 0u32.to_le_bytes().encode(), + vec![42], + )); // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + Contracts::contract_address(&ALICE, &caller_code_hash, &[]), 0, GAS_LIMIT, callee_code_hash.as_ref().to_vec(), @@ -1417,20 +1579,21 @@ fn cannot_self_destruct_through_draning() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); @@ -1439,7 +1602,7 @@ fn cannot_self_destruct_through_draning() { assert_ok!( Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, vec![], @@ -1456,20 +1619,21 @@ fn cannot_self_destruct_while_live() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); @@ -1478,7 +1642,7 @@ fn cannot_self_destruct_while_live() { assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, vec![0], @@ -1488,7 +1652,7 @@ fn cannot_self_destruct_while_live() { // Check that BOB is still alive. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); }); @@ -1502,28 +1666,33 @@ fn self_destruct_works() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + let _ = Balances::deposit_creating(&DJANGO, 1_000_000); // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); + // Drop all previous events + initialize_block(2); + // Call BOB without input data which triggers termination. assert_matches!( Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, vec![], @@ -1531,11 +1700,41 @@ fn self_destruct_works() { Ok(_) ); + pretty_assertions::assert_eq!(System::events(), vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::frame_system( + frame_system::Event::KilledAccount(addr.clone()) + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_balances( + pallet_balances::Event::Transfer(addr.clone(), DJANGO, 93_654) + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_contracts(RawEvent::CodeRemoved(code_hash)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::pallet_contracts( + RawEvent::Terminated(addr.clone(), DJANGO) + ), + topics: vec![], + }, + ]); + // Check that account is gone - assert!(ContractInfoOf::::get(BOB).is_none()); + assert!(ContractInfoOf::::get(&addr).is_none()); // check that the beneficiary (django) got remaining balance - assert_eq!(Balances::free_balance(DJANGO), 100_000); + // some rent was deducted before termination + assert_eq!(Balances::free_balance(DJANGO), 1_093_654); }); } @@ -1552,59 +1751,70 @@ fn destroy_contract_and_transfer_funds() { .execute_with(|| { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_wasm)); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 200_000, + GAS_LIMIT, + callee_wasm, + vec![], + vec![42] + )); // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 200_000, GAS_LIMIT, - caller_code_hash.into(), + caller_wasm, callee_code_hash.as_ref().to_vec(), + vec![], )); + let addr_bob = Contracts::contract_address(&ALICE, &caller_code_hash, &[]); + let addr_charlie = Contracts::contract_address( + &addr_bob, &callee_code_hash, &[0x47, 0x11] + ); // Check that the CHARLIE contract has been instantiated. assert_matches!( - ContractInfoOf::::get(CHARLIE), + ContractInfoOf::::get(&addr_charlie), Some(ContractInfo::Alive(_)) ); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr_bob, 0, GAS_LIMIT, - CHARLIE.encode(), + addr_charlie.encode(), )); // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(ContractInfoOf::::get(CHARLIE).is_none()); + assert!(ContractInfoOf::::get(&addr_charlie).is_none()); }); } #[test] fn cannot_self_destruct_in_constructor() { - let (wasm, code_hash) = compile_module::("self_destructing_constructor").unwrap(); + let (wasm, _) = compile_module::("self_destructing_constructor").unwrap(); ExtBuilder::default() .existential_deposit(50) .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); // Fail to instantiate the BOB because the contructor calls seal_terminate. assert_err_ignore_postinfo!( - Contracts::instantiate( + Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], ), - Error::::NewContractNotFunded, + Error::::NotCallable, ); }); } @@ -1618,16 +1828,17 @@ fn crypto_hashes() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); // Instantiate the CRYPTO_HASHES contract. - assert_ok!(Contracts::instantiate( + assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Perform the call. let input = b"_DEAD_BEEF"; use sp_io::hashing::*; @@ -1651,7 +1862,7 @@ fn crypto_hashes() { params.extend_from_slice(input); let result = >::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, params, @@ -1667,24 +1878,26 @@ fn crypto_hashes() { fn transfer_return_code() { let (wasm, code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); - let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); assert_ok!( - Contracts::instantiate( + Contracts::instantiate_with_code( Origin::signed(ALICE), - subsistence, + subsistence * 100, GAS_LIMIT, - code_hash.into(), + wasm, + vec![], vec![], ), ); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Contract has only the minimal balance so any transfer will return BelowSubsistence. + Balances::make_free_balance_be(&addr, subsistence); let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, vec![], @@ -1694,11 +1907,11 @@ fn transfer_return_code() { // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 100); + Balances::reserve(&addr, subsistence + 100).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr, 0, GAS_LIMIT, vec![], @@ -1712,84 +1925,88 @@ fn call_return_code() { let (caller_code, caller_hash) = compile_module::("call_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); - let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); - let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_code)); + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); assert_ok!( - Contracts::instantiate( + Contracts::instantiate_with_code( Origin::signed(ALICE), - subsistence, + subsistence * 100, GAS_LIMIT, - caller_hash.into(), + caller_code, vec![0], + vec![], ), ); + let addr_bob = Contracts::contract_address(&ALICE, &caller_hash, &[]); + Balances::make_free_balance_be(&addr_bob, subsistence); // Contract calls into Django which is no valid contract let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], + AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::NotCallable); assert_ok!( - Contracts::instantiate( + Contracts::instantiate_with_code( Origin::signed(CHARLIE), - subsistence, + subsistence * 100, GAS_LIMIT, - callee_hash.into(), + callee_code, vec![0], + vec![], ), ); + let addr_django = Contracts::contract_address(&CHARLIE, &callee_hash, &[]); + Balances::make_free_balance_be(&addr_django, subsistence); // Contract has only the minimal balance so any transfer will return BelowSubsistence. let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr_bob, subsistence + 100); + Balances::reserve(&addr_bob, subsistence + 100).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. - Balances::make_free_balance_be(&BOB, subsistence + 1000); + Balances::make_free_balance_be(&addr_bob, subsistence + 1000); let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![1], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&1u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr_bob, 0, GAS_LIMIT, - vec![2], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&2u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); @@ -1801,52 +2018,64 @@ fn instantiate_return_code() { let (caller_code, caller_hash) = compile_module::("instantiate_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); - let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); - let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_code)); + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); let callee_hash = callee_hash.as_ref().to_vec(); assert_ok!( - Contracts::instantiate( + Contracts::instantiate_with_code( Origin::signed(ALICE), - subsistence, + subsistence * 100, GAS_LIMIT, - caller_hash.into(), + callee_code, + vec![], vec![], ), ); + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![], + ), + ); + let addr = Contracts::contract_address(&ALICE, &caller_hash, &[]); + // Contract has only the minimal balance so any transfer will return BelowSubsistence. + Balances::make_free_balance_be(&addr, subsistence); let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, - vec![0; 33], + callee_hash.clone(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence - // threshold when transfering 100 balance but this balance is reserved so + // threshold when transfering the balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 10_000); + Balances::reserve(&addr, subsistence + 10_000).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, - vec![0; 33], + callee_hash.clone(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid - Balances::make_free_balance_be(&BOB, subsistence + 1000); + Balances::make_free_balance_be(&addr, subsistence + 10_000); let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, vec![0; 33], @@ -1856,22 +2085,635 @@ fn instantiate_return_code() { // Contract has enough balance but callee reverts because "1" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, - callee_hash.iter().cloned().chain(sp_std::iter::once(1)).collect(), + callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr, 0, GAS_LIMIT, - callee_hash.iter().cloned().chain(sp_std::iter::once(2)).collect(), + callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } + +#[test] +fn disabled_chain_extension_wont_deploy() { + let (code, _hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + TestExtension::disable(); + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + 3 * subsistence, + GAS_LIMIT, + code, + vec![], + vec![], + ), + "module uses chain extensions but chain extensions are disabled", + ); + }); +} + +#[test] +fn disabled_chain_extension_errors_on_call() { + let (code, hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + TestExtension::disable(); + assert_err_ignore_postinfo!( + Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + vec![], + ), + Error::::NoChainExtension, + ); + }); +} + +#[test] +fn chain_extension_works() { + let (code, hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + + // The contract takes a up to 2 byte buffer where the first byte passed is used as + // as func_id to the chain extension which behaves differently based on the + // func_id. + + // 0 = read input buffer and pass it through as output + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + vec![0, 99], + ); + let gas_consumed = result.gas_consumed; + assert_eq!(TestExtension::last_seen_buffer(), vec![0, 99]); + assert_eq!(result.exec_result.unwrap().data, vec![0, 99]); + + // 1 = treat inputs as integer primitives and store the supplied integers + Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + vec![1], + ).exec_result.unwrap(); + // those values passed in the fixture + assert_eq!(TestExtension::last_seen_inputs(), (4, 1, 16, 12)); + + // 2 = charge some extra weight (amount supplied in second byte) + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + vec![2, 42], + ); + assert_ok!(result.exec_result); + assert_eq!(result.gas_consumed, gas_consumed + 42); + + // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + vec![3], + ).exec_result.unwrap(); + assert_eq!(result.flags, ReturnFlags::REVERT); + assert_eq!(result.data, vec![42, 99]); + }); +} + +#[test] +fn lazy_removal_works() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let info = >::get(&addr).unwrap().get_alive().unwrap(); + let trie = &info.child_trie_info(); + + // Put value into the contracts child trie + child::put(trie, &[99], &42); + + // Terminate the contract + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + vec![], + )); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + // But value should be still there as the lazy removal did not run, yet. + assert_matches!(child::get(trie, &[99]), Some(42)); + + // Run the lazy removal + Contracts::on_initialize(Weight::max_value()); + + // Value should be gone now + assert_matches!(child::get::(trie, &[99]), None); + }); +} + +#[test] +fn lazy_removal_partial_remove_works() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + + // We create a contract with some extra keys above the weight limit + let extra_keys = 7u32; + let weight_limit = 5_000_000_000; + let (_, max_keys) = Storage::::deletion_budget(1, weight_limit); + let vals: Vec<_> = (0..max_keys + extra_keys).map(|i| { + (blake2_256(&i.encode()), (i as u32), (i as u32).encode()) + }) + .collect(); + + let mut ext = ExtBuilder::default().existential_deposit(50).build(); + + let trie = ext.execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let info = >::get(&addr).unwrap().get_alive().unwrap(); + let trie = &info.child_trie_info(); + + // Put value into the contracts child trie + for val in &vals { + Storage::::write( + &addr, + &info.trie_id, + &val.0, + Some(val.2.clone()), + ).unwrap(); + } + + // Terminate the contract + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + vec![], + )); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(trie, &blake2_256(&val.0)), Some(val.1)); + } + + trie.clone() + }); + + // The lazy removal limit only applies to the backend but not to the overlay. + // This commits all keys from the overlay to the backend. + ext.commit_all().unwrap(); + + ext.execute_with(|| { + // Run the lazy removal + let weight_used = Storage::::process_deletion_queue_batch(weight_limit); + + // Weight should be exhausted because we could not even delete all keys + assert_eq!(weight_used, weight_limit); + + let mut num_deleted = 0u32; + let mut num_remaining = 0u32; + + for val in &vals { + match child::get::(&trie, &blake2_256(&val.0)) { + None => num_deleted += 1, + Some(x) if x == val.1 => num_remaining += 1, + Some(_) => panic!("Unexpected value in contract storage"), + } + } + + // All but one key is removed + assert_eq!(num_deleted + num_remaining, vals.len() as u32); + assert_eq!(num_deleted, max_keys); + assert_eq!(num_remaining, extra_keys); + }); +} + +#[test] +fn lazy_removal_does_no_run_on_full_block() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let info = >::get(&addr).unwrap().get_alive().unwrap(); + let trie = &info.child_trie_info(); + let max_keys = 30; + + // Create some storage items for the contract. + let vals: Vec<_> = (0..max_keys).map(|i| { + (blake2_256(&i.encode()), (i as u32), (i as u32).encode()) + }) + .collect(); + + // Put value into the contracts child trie + for val in &vals { + Storage::::write( + &addr, + &info.trie_id, + &val.0, + Some(val.2.clone()), + ).unwrap(); + } + + // Terminate the contract + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + vec![], + )); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(trie, &blake2_256(&val.0)), Some(val.1)); + } + + // Fill up the block which should prevent the lazy storage removal from running. + System::register_extra_weight_unchecked( + ::BlockWeights::get().max_block, + DispatchClass::Mandatory, + ); + + // Run the lazy removal without any limit so that all keys would be removed if there + // had been some weight left in the block. + let weight_used = Contracts::on_initialize(Weight::max_value()); + let base = <::WeightInfo as crate::WeightInfo>::on_initialize(); + assert_eq!(weight_used, base); + + // All the keys are still in place + for val in &vals { + assert_eq!(child::get::(trie, &blake2_256(&val.0)), Some(val.1)); + } + + // Run the lazy removal directly which disregards the block limits + Storage::::process_deletion_queue_batch(Weight::max_value()); + + // Now the keys should be gone + for val in &vals { + assert_eq!(child::get::(trie, &blake2_256(&val.0)), None); + } + }); +} + + +#[test] +fn lazy_removal_does_not_use_all_weight() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let info = >::get(&addr).unwrap().get_alive().unwrap(); + let trie = &info.child_trie_info(); + let weight_limit = 5_000_000_000; + let (weight_per_key, max_keys) = Storage::::deletion_budget(1, weight_limit); + + // We create a contract with one less storage item than we can remove within the limit + let vals: Vec<_> = (0..max_keys - 1).map(|i| { + (blake2_256(&i.encode()), (i as u32), (i as u32).encode()) + }) + .collect(); + + // Put value into the contracts child trie + for val in &vals { + Storage::::write( + &addr, + &info.trie_id, + &val.0, + Some(val.2.clone()), + ).unwrap(); + } + + // Terminate the contract + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + vec![], + )); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(trie, &blake2_256(&val.0)), Some(val.1)); + } + + // Run the lazy removal + let weight_used = Storage::::process_deletion_queue_batch(weight_limit); + + // We have one less key in our trie than our weight limit suffices for + assert_eq!(weight_used, weight_limit - weight_per_key); + + // All the keys are removed + for val in vals { + assert_eq!(child::get::(trie, &blake2_256(&val.0)), None); + } + }); +} + +#[test] +fn deletion_queue_full() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Module::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ), + ); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + + // fill the deletion queue up until its limit + Storage::::fill_queue_with_dummies(); + + // Terminate the contract should fail + assert_err_ignore_postinfo!( + Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + vec![], + ), + Error::::DeletionQueueFull, + ); + + // Contract should be alive because removal failed + >::get(&addr).unwrap().get_alive().unwrap(); + + // make the contract ripe for eviction + initialize_block(5); + + // eviction should fail for the same reason as termination + assert_err!( + Contracts::claim_surcharge(Origin::none(), addr.clone(), Some(ALICE)), + Error::::DeletionQueueFull, + ); + + // Contract should be alive because removal failed + >::get(&addr).unwrap().get_alive().unwrap(); + }); +} + +#[test] +fn not_deployed_if_endowment_too_low_for_first_rent() { + let (wasm, code_hash) = compile_module::("set_rent").unwrap(); + + // The instantiation deducted the rent for one block immediately + let first_rent = ::RentFraction::get() + // base_deposit + deploy_set_storage (4 bytes in 1 item) - free_balance + .mul_ceil(80_000u32 + 40_000 + 10_000 - 30_000) + // blocks to rent + * 1; + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_storage_noop!(assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + (BalanceOf::::from(first_rent) - BalanceOf::::from(1u32)) + .encode(), // rent allowance + vec![], + ), + Error::::NewContractNotFunded, + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + assert_matches!(ContractInfoOf::::get(&addr), None); + }); +} + +#[test] +fn surcharge_reward_is_capped() { + let (wasm, code_hash) = compile_module::("set_rent").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + >::from(10_000u32).encode(), // rent allowance + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let contract = >::get(&addr).unwrap().get_alive().unwrap(); + let balance = Balances::free_balance(&ALICE); + let reward = ::SurchargeReward::get(); + + // some rent should have payed due to instantiation + assert_ne!(contract.rent_payed, 0); + + // the reward should be parameterized sufficiently high to make this test useful + assert!(reward > contract.rent_payed); + + // make contract eligible for eviction + initialize_block(40); + + // this should have removed the contract + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr.clone(), Some(ALICE))); + + // this reward does not take into account the last rent payment collected during eviction + let capped_reward = reward.min(contract.rent_payed); + + // this is smaller than the actual reward because it does not take into account the + // rent collected during eviction + assert!(Balances::free_balance(&ALICE) > balance + capped_reward); + + // the full reward is not payed out because of the cap introduced by rent_payed + assert!(Balances::free_balance(&ALICE) < balance + reward); + }); +} + +#[test] +fn refcounter() { + let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = Module::::subsistence_threshold(); + + // Create two contracts with the same code and check that they do in fact share it. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm.clone(), + vec![], + vec![0], + )); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm.clone(), + vec![], + vec![1], + )); + assert_refcount!(code_hash, 2); + + // Sharing should also work with the usual instantiate call + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code_hash, + vec![], + vec![2], + )); + assert_refcount!(code_hash, 3); + + // addresses of all three existing contracts + let addr0 = Contracts::contract_address(&ALICE, &code_hash, &[0]); + let addr1 = Contracts::contract_address(&ALICE, &code_hash, &[1]); + let addr2 = Contracts::contract_address(&ALICE, &code_hash, &[2]); + + // Terminating one contract should decrement the refcount + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr0, + 0, + GAS_LIMIT, + vec![], + )); + assert_refcount!(code_hash, 2); + + // make remaining contracts eligible for eviction + initialize_block(40); + + // remove one of them + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr1, Some(ALICE))); + assert_refcount!(code_hash, 1); + + // Pristine code should still be there + crate::PristineCode::::get(code_hash).unwrap(); + + // remove the last contract + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr2, Some(ALICE))); + assert_refcount!(code_hash, 0); + + // all code should be gone + assert_matches!(crate::PristineCode::::get(code_hash), None); + assert_matches!(crate::CodeStorage::::get(code_hash), None); + }); +} diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 34b8ea7443538..6166918c80c94 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! A module that implements instrumented code cache. //! @@ -26,45 +27,84 @@ //! this guarantees that every instrumented contract code in cache cannot have the version equal to the current one. //! Thus, before executing a contract it should be reinstrument with new schedule. -use crate::wasm::{prepare, runtime::Env, PrefabWasmModule}; -use crate::{CodeHash, CodeStorage, PristineCode, Schedule, Trait}; -use sp_std::prelude::*; -use sp_runtime::traits::Hash; -use frame_support::StorageMap; +use crate::{ + CodeHash, CodeStorage, PristineCode, Schedule, Config, Error, + wasm::{prepare, PrefabWasmModule}, Module as Contracts, RawEvent, +}; +use sp_core::crypto::UncheckedFrom; +use frame_support::{StorageMap, dispatch::{DispatchError, DispatchResult}}; -/// Put code in the storage. The hash of code is used as a key and is returned -/// as a result of this function. +/// Put the instrumented module in storage. /// -/// This function instruments the given code and caches it in the storage. -pub fn save( - original_code: Vec, - schedule: &Schedule, -) -> Result, &'static str> { - let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - let code_hash = T::Hashing::hash(&original_code); - - >::insert(code_hash, prefab_module); - >::insert(code_hash, original_code); +/// Increments the refcount of the in-storage `prefab_module` if it already exists in storage +/// under the specified `code_hash`. +pub fn store(mut prefab_module: PrefabWasmModule) +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + let code_hash = sp_std::mem::take(&mut prefab_module.code_hash); - Ok(code_hash) + // original_code is only `Some` if the contract was instantiated from a new code + // but `None` if it was loaded from storage. + if let Some(code) = prefab_module.original_code.take() { + >::insert(&code_hash, code); + } + >::mutate(&code_hash, |existing| { + match existing { + Some(module) => increment_64(&mut module.refcount), + None => { + *existing = Some(prefab_module); + Contracts::::deposit_event(RawEvent::CodeStored(code_hash)) + } + } + }); } -/// Version of `save` to be used in runtime benchmarks. -// -/// This version neither checks nor instruments the passed in code. This is useful -/// when code needs to be benchmarked without the injected instrumentation. -#[cfg(feature = "runtime-benchmarks")] -pub fn save_raw( - original_code: Vec, - schedule: &Schedule, -) -> Result, &'static str> { - let prefab_module = prepare::benchmarking::prepare_contract::(&original_code, schedule)?; - let code_hash = T::Hashing::hash(&original_code); +/// Decrement the refcount and store. +/// +/// Removes the code instead of storing it when the refcount drops to zero. +pub fn store_decremented(mut prefab_module: PrefabWasmModule) +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + prefab_module.refcount = prefab_module.refcount.saturating_sub(1); + if prefab_module.refcount > 0 { + >::insert(prefab_module.code_hash, prefab_module); + } else { + >::remove(prefab_module.code_hash); + finish_removal::(prefab_module.code_hash); + } +} - >::insert(code_hash, prefab_module); - >::insert(code_hash, original_code); +/// Increment the refcount of a code in-storage by one. +pub fn increment_refcount(code_hash: CodeHash) -> DispatchResult +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + >::mutate(code_hash, |existing| { + if let Some(module) = existing { + increment_64(&mut module.refcount); + Ok(()) + } else { + Err(Error::::CodeNotFound.into()) + } + }) +} - Ok(code_hash) +/// Decrement the refcount of a code in-storage by one and remove the code when it drops to zero. +pub fn decrement_refcount(code_hash: CodeHash) +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + >::mutate_exists(code_hash, |existing| { + if let Some(module) = existing { + module.refcount = module.refcount.saturating_sub(1); + if module.refcount == 0 { + *existing = None; + finish_removal::(code_hash); + } + } + }); } /// Load code with the given code hash. @@ -72,22 +112,52 @@ pub fn save_raw( /// If the module was instrumented with a lower version of schedule than /// the current one given as an argument, then this function will perform /// re-instrumentation and update the cache in the storage. -pub fn load( - code_hash: &CodeHash, - schedule: &Schedule, -) -> Result { - let mut prefab_module = - >::get(code_hash).ok_or_else(|| "code is not found")?; +pub fn load( + code_hash: CodeHash, + schedule: Option<&Schedule>, +) -> Result, DispatchError> +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + let mut prefab_module = >::get(code_hash) + .ok_or_else(|| Error::::CodeNotFound)?; - if prefab_module.schedule_version < schedule.version { - // The current schedule version is greater than the version of the one cached - // in the storage. - // - // We need to re-instrument the code with the latest schedule here. - let original_code = - >::get(code_hash).ok_or_else(|| "pristine code is not found")?; - prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - >::insert(&code_hash, &prefab_module); + if let Some(schedule) = schedule { + if prefab_module.schedule_version < schedule.version { + // The current schedule version is greater than the version of the one cached + // in the storage. + // + // We need to re-instrument the code with the latest schedule here. + let original_code = >::get(code_hash) + .ok_or_else(|| Error::::CodeNotFound)?; + prefab_module.code = prepare::reinstrument_contract::(original_code, schedule)?; + prefab_module.schedule_version = schedule.version; + >::insert(&code_hash, &prefab_module); + } } + prefab_module.code_hash = code_hash; Ok(prefab_module) } + +/// Finish removal of a code by deleting the pristine code and emitting an event. +fn finish_removal(code_hash: CodeHash) +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + >::remove(code_hash); + Contracts::::deposit_event(RawEvent::CodeRemoved(code_hash)) +} + +/// Increment the refcount panicking if it should ever overflow (which will not happen). +/// +/// We try hard to be infallible here because otherwise more storage transactions would be +/// necessary to account for failures in storing code for an already instantiated contract. +fn increment_64(refcount: &mut u64) { + *refcount = refcount.checked_add(1).expect(" + refcount is 64bit. Generating this overflow would require to store + _at least_ 18 exabyte of data assuming that a contract consumes only + one byte of data. Any node would run out of storage space before hitting + this overflow. + qed + "); +} diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index 2538f85fb7385..dbb6705e9722d 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Definition of macros that hides boilerplate of defining external environment //! for a wasm module. @@ -96,7 +97,7 @@ macro_rules! unmarshall_then_body { #[inline(always)] pub fn constrain_closure(f: F) -> F where - F: FnOnce() -> Result, + F: FnOnce() -> Result, { f } @@ -109,14 +110,20 @@ macro_rules! unmarshall_then_body_then_marshall { >(|| { unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) }); - let r = body()?; + let r = body().map_err(|reason| { + $ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; return Ok(sp_sandbox::ReturnValue::Value({ use $crate::wasm::env_def::ConvertibleToWasm; r.to_typed_value() })) }); ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) => $body:tt ) => ({ let body = $crate::wasm::env_def::macros::constrain_closure::<(), _>(|| { unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) }); - body()?; + body().map_err(|reason| { + $ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; return Ok(sp_sandbox::ReturnValue::Unit) }) } @@ -127,7 +134,12 @@ macro_rules! define_func { fn $name< E: $seal_ty >( $ctx: &mut $crate::wasm::Runtime, args: &[sp_sandbox::Value], - ) -> Result { + ) -> Result + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { #[allow(unused)] let mut args = args.iter(); @@ -183,7 +195,12 @@ macro_rules! define_env { } } - impl $crate::wasm::env_def::FunctionImplProvider for $init_name { + impl $crate::wasm::env_def::FunctionImplProvider for $init_name + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { fn impls)>(f: &mut F) { register_func!(f, < E: $seal_ty > ; $( $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* ); } @@ -197,15 +214,24 @@ mod tests { use parity_wasm::elements::ValueType; use sp_runtime::traits::Zero; use sp_sandbox::{ReturnValue, Value}; - use crate::wasm::tests::MockExt; - use crate::wasm::Runtime; - use crate::exec::Ext; - use crate::gas::Gas; + use crate::{ + wasm::{Runtime, runtime::TrapReason, tests::MockExt}, + exec::Ext, + gas::Gas, + }; + + struct TestRuntime { + value: u32, + } + + impl TestRuntime { + fn set_trap_reason(&mut self, _reason: TrapReason) {} + } #[test] fn macro_unmarshall_then_body_then_marshall_value_or_trap() { fn test_value( - _ctx: &mut u32, + _ctx: &mut TestRuntime, args: &[sp_sandbox::Value], ) -> Result { let mut args = args.iter(); @@ -214,7 +240,7 @@ mod tests { _ctx, (a: u32, b: u32) -> u32 => { if b == 0 { - Err(sp_sandbox::HostError) + Err(crate::wasm::runtime::TrapReason::Termination) } else { Ok(a / b) } @@ -222,7 +248,7 @@ mod tests { ) } - let ctx = &mut 0; + let ctx = &mut TestRuntime { value: 0 }; assert_eq!( test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), ReturnValue::Value(Value::I32(5)), @@ -233,7 +259,7 @@ mod tests { #[test] fn macro_unmarshall_then_body_then_marshall_unit() { fn test_unit( - ctx: &mut u32, + ctx: &mut TestRuntime, args: &[sp_sandbox::Value], ) -> Result { let mut args = args.iter(); @@ -241,16 +267,16 @@ mod tests { args, ctx, (a: u32, b: u32) => { - *ctx = a + b; + ctx.value = a + b; Ok(()) } ) } - let ctx = &mut 0; + let ctx = &mut TestRuntime { value: 0 }; let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); assert_eq!(result, ReturnValue::Unit); - assert_eq!(*ctx, 5); + assert_eq!(ctx.value, 5); } #[test] @@ -260,7 +286,7 @@ mod tests { if !amount.is_zero() { Ok(()) } else { - Err(sp_sandbox::HostError) + Err(TrapReason::Termination) } }); let _f: fn(&mut Runtime, &[sp_sandbox::Value]) @@ -312,7 +338,7 @@ mod tests { if !amount.is_zero() { Ok(()) } else { - Err(sp_sandbox::HostError) + Err(crate::wasm::runtime::TrapReason::Termination) } }, ); diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index 7b67f74ec95c8..0d9ceeee02373 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use super::Runtime; use crate::exec::Ext; diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 2440abed8ec41..56be9f35313a6 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -1,124 +1,180 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module provides a means for executing contracts //! represented in wasm. -use crate::{CodeHash, Schedule, Trait}; -use crate::wasm::env_def::FunctionImplProvider; -use crate::exec::Ext; -use crate::gas::GasMeter; - -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_sandbox; - #[macro_use] mod env_def; mod code_cache; mod prepare; mod runtime; -use self::runtime::{to_execution_result, Runtime}; -use self::code_cache::load as load_code; +use crate::{ + CodeHash, Schedule, Config, + wasm::env_def::FunctionImplProvider, + exec::{Ext, Executable, ExportedFunction}, + gas::GasMeter, +}; +use sp_std::prelude::*; +use sp_core::crypto::UncheckedFrom; +use codec::{Encode, Decode}; +use frame_support::dispatch::{DispatchError, DispatchResult}; use pallet_contracts_primitives::ExecResult; - -pub use self::code_cache::save as save_code; -#[cfg(feature = "runtime-benchmarks")] -pub use self::code_cache::save_raw as save_code_raw; -pub use self::runtime::ReturnCode; +pub use self::runtime::{ReturnCode, Runtime, RuntimeToken}; /// A prepared wasm module ready for execution. +/// +/// # Note +/// +/// This data structure is mostly immutable once created and stored. The exceptions that +/// can be changed by calling a contract are `refcount`, `schedule_version` and `code`. +/// `refcount` can change when a contract instantiates a new contract or self terminates. +/// `schedule_version` and `code` when a contract with an outdated instrumention is called. +/// Therefore one must be careful when holding any in-memory representation of this type while +/// calling into a contract as those fields can get out of date. #[derive(Clone, Encode, Decode)] -pub struct PrefabWasmModule { +pub struct PrefabWasmModule { /// Version of the schedule with which the code was instrumented. #[codec(compact)] schedule_version: u32, + /// Initial memory size of a contract's sandbox. #[codec(compact)] initial: u32, + /// The maximum memory size of a contract's sandbox. #[codec(compact)] maximum: u32, + /// The number of alive contracts that use this as their contract code. + /// + /// If this number drops to zero this module is removed from storage. + #[codec(compact)] + refcount: u64, /// This field is reserved for future evolution of format. /// - /// Basically, for now this field will be serialized as `None`. In the future - /// we would be able to extend this structure with. + /// For now this field is serialized as `None`. In the future we are able to change the + /// type parameter to a new struct that contains the fields that we want to add. + /// That new struct would also contain a reserved field for its future extensions. + /// This works because in SCALE `None` is encoded independently from the type parameter + /// of the option. _reserved: Option<()>, /// Code instrumented with the latest schedule. code: Vec, + /// The size of the uninstrumented code. + /// + /// We cache this value here in order to avoid the need to pull the pristine code + /// from storage when we only need its length for rent calculations. + original_code_len: u32, + /// The uninstrumented, pristine version of the code. + /// + /// It is not stored because the pristine code has its own storage item. The value + /// is only `Some` when this module was created from an `original_code` and `None` if + /// it was loaded from storage. + #[codec(skip)] + original_code: Option>, + /// The code hash of the stored code which is defined as the hash over the `original_code`. + /// + /// As the map key there is no need to store the hash in the value, too. It is set manually + /// when loading the module from storage. + #[codec(skip)] + code_hash: CodeHash, } -/// Wasm executable loaded by `WasmLoader` and executed by `WasmVm`. -pub struct WasmExecutable { - entrypoint_name: &'static str, - prefab_module: PrefabWasmModule, +impl ExportedFunction { + /// The wasm export name for the function. + fn identifier(&self) -> &str { + match self { + Self::Constructor => "deploy", + Self::Call => "call", + } + } } -/// Loader which fetches `WasmExecutable` from the code cache. -pub struct WasmLoader<'a, T: Trait> { - schedule: &'a Schedule, -} +impl PrefabWasmModule +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + /// Create the module by checking and instrumenting `original_code`. + pub fn from_code( + original_code: Vec, + schedule: &Schedule + ) -> Result { + prepare::prepare_contract(original_code, schedule).map_err(Into::into) + } -impl<'a, T: Trait> WasmLoader<'a, T> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmLoader { schedule } + /// Create and store the module without checking nor instrumenting the passed code. + /// + /// # Note + /// + /// This is useful for benchmarking where we don't want instrumentation to skew + /// our results. + #[cfg(feature = "runtime-benchmarks")] + pub fn store_code_unchecked( + original_code: Vec, + schedule: &Schedule + ) -> DispatchResult { + let executable = prepare::benchmarking::prepare_contract(original_code, schedule) + .map_err::(Into::into)?; + code_cache::store(executable); + Ok(()) } -} -impl<'a, T: Trait> crate::exec::Loader for WasmLoader<'a, T> { - type Executable = WasmExecutable; + /// Return the refcount of the module. + #[cfg(test)] + pub fn refcount(&self) -> u64 { + self.refcount + } +} - fn load_init(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: "deploy", - prefab_module, - }) +impl Executable for PrefabWasmModule +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + fn from_storage( + code_hash: CodeHash, + schedule: &Schedule + ) -> Result { + code_cache::load(code_hash, Some(schedule)) } - fn load_main(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: "call", - prefab_module, - }) + + fn from_storage_noinstr(code_hash: CodeHash) -> Result { + code_cache::load(code_hash, None) } -} -/// Implementation of `Vm` that takes `WasmExecutable` and executes it. -pub struct WasmVm<'a, T: Trait> { - schedule: &'a Schedule, -} + fn drop_from_storage(self) { + code_cache::store_decremented(self); + } -impl<'a, T: Trait> WasmVm<'a, T> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmVm { schedule } + fn add_user(code_hash: CodeHash) -> DispatchResult { + code_cache::increment_refcount::(code_hash) } -} -impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a, T> { - type Executable = WasmExecutable; + fn remove_user(code_hash: CodeHash) { + code_cache::decrement_refcount::(code_hash) + } fn execute>( - &self, - exec: &WasmExecutable, + self, mut ext: E, + function: &ExportedFunction, input_data: Vec, gas_meter: &mut GasMeter, ) -> ExecResult { let memory = - sp_sandbox::Memory::new(exec.prefab_module.initial, Some(exec.prefab_module.maximum)) + sp_sandbox::Memory::new(self.initial, Some(self.maximum)) .unwrap_or_else(|_| { // unlike `.expect`, explicit panic preserves the source location. // Needed as we can't use `RUST_BACKTRACE` in here. @@ -138,32 +194,51 @@ impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a, T> { let mut runtime = Runtime::new( &mut ext, input_data, - &self.schedule, memory, gas_meter, ); + // We store before executing so that the code hash is available in the constructor. + let code = self.code.clone(); + if let &ExportedFunction::Constructor = function { + code_cache::store(self) + } + // Instantiate the instance from the instrumented module code and invoke the contract // entrypoint. - let result = sp_sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) - .and_then(|mut instance| instance.invoke(exec.entrypoint_name, &[], &mut runtime)); - to_execution_result(runtime, result) + let result = sp_sandbox::Instance::new(&code, &imports, &mut runtime) + .and_then(|mut instance| instance.invoke(function.identifier(), &[], &mut runtime)); + + runtime.to_execution_result(result) + } + + fn code_hash(&self) -> &CodeHash { + &self.code_hash + } + + fn occupied_storage(&self) -> u32 { + // We disregard the size of the struct itself as the size is completely + // dominated by the code size. + let len = self.original_code_len.saturating_add(self.code.len() as u32); + len.checked_div(self.refcount as u32).unwrap_or(len) } } #[cfg(test)] mod tests { use super::*; + use crate::{ + CodeHash, BalanceOf, Error, Module as Contracts, + exec::{Ext, StorageKey, AccountIdOf, Executable}, + gas::{Gas, GasMeter}, + tests::{Test, Call, ALICE, BOB}, + }; use std::collections::HashMap; use sp_core::H256; - use crate::exec::{Ext, StorageKey}; - use crate::gas::{Gas, GasMeter}; - use crate::tests::{Test, Call}; - use crate::wasm::prepare::prepare_contract; - use crate::{CodeHash, BalanceOf, Error}; use hex_literal::hex; use sp_runtime::DispatchError; - use frame_support::weights::Weight; + use frame_support::{dispatch::DispatchResult, weights::Weight}; + use assert_matches::assert_matches; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags, ExecError, ErrorOrigin}; const GAS_LIMIT: Gas = 10_000_000_000; @@ -173,7 +248,7 @@ mod tests { #[derive(Debug, PartialEq, Eq)] struct RestoreEntry { - dest: u64, + dest: AccountIdOf, code_hash: H256, rent_allowance: u64, delta: Vec, @@ -185,16 +260,17 @@ mod tests { endowment: u64, data: Vec, gas_left: u64, + salt: Vec, } #[derive(Debug, PartialEq, Eq)] struct TerminationEntry { - beneficiary: u64, + beneficiary: AccountIdOf, } #[derive(Debug, PartialEq, Eq)] struct TransferEntry { - to: u64, + to: AccountIdOf, value: u64, data: Vec, } @@ -209,7 +285,7 @@ mod tests { restores: Vec, // (topics, data) events: Vec<(Vec, Vec)>, - next_account_id: u64, + schedule: Schedule, } impl Ext for MockExt { @@ -218,27 +294,27 @@ mod tests { fn get_storage(&self, key: &StorageKey) -> Option> { self.storage.get(key).cloned() } - fn set_storage(&mut self, key: StorageKey, value: Option>) { + fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult { *self.storage.entry(key).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); + Ok(()) } fn instantiate( &mut self, - code_hash: &CodeHash, + code_hash: CodeHash, endowment: u64, gas_meter: &mut GasMeter, data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { + salt: &[u8], + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, data: data.to_vec(), gas_left: gas_meter.gas_left(), + salt: salt.to_vec(), }); - let address = self.next_account_id; - self.next_account_id += 1; - Ok(( - address, + Contracts::::contract_address(&ALICE, &code_hash, salt), ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new(), @@ -247,11 +323,11 @@ mod tests { } fn transfer( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, ) -> Result<(), DispatchError> { self.transfers.push(TransferEntry { - to: *to, + to: to.clone(), value, data: Vec::new(), }); @@ -259,13 +335,13 @@ mod tests { } fn call( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, _gas_meter: &mut GasMeter, data: Vec, ) -> ExecResult { self.transfers.push(TransferEntry { - to: *to, + to: to.clone(), value, data: data, }); @@ -275,20 +351,20 @@ mod tests { } fn terminate( &mut self, - beneficiary: &u64, + beneficiary: &AccountIdOf, ) -> Result<(), DispatchError> { self.terminations.push(TerminationEntry { - beneficiary: *beneficiary, + beneficiary: beneficiary.clone(), }); Ok(()) } fn restore_to( &mut self, - dest: u64, + dest: AccountIdOf, code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(), &'static str> { + ) -> Result<(), DispatchError> { self.restores.push(RestoreEntry { dest, code_hash, @@ -297,11 +373,11 @@ mod tests { }); Ok(()) } - fn caller(&self) -> &u64 { - &42 + fn caller(&self) -> &AccountIdOf { + &ALICE } - fn address(&self) -> &u64 { - &69 + fn address(&self) -> &AccountIdOf { + &BOB } fn balance(&self) -> u64 { 228 @@ -345,6 +421,10 @@ mod tests { fn get_weight_price(&self, weight: Weight) -> BalanceOf { BalanceOf::::from(1312_u32).saturating_mul(weight.into()) } + + fn schedule(&self) -> &Schedule { + &self.schedule + } } impl Ext for &mut MockExt { @@ -353,34 +433,35 @@ mod tests { fn get_storage(&self, key: &[u8; 32]) -> Option> { (**self).get_storage(key) } - fn set_storage(&mut self, key: [u8; 32], value: Option>) { + fn set_storage(&mut self, key: [u8; 32], value: Option>) -> DispatchResult { (**self).set_storage(key, value) } fn instantiate( &mut self, - code: &CodeHash, + code: CodeHash, value: u64, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { - (**self).instantiate(code, value, gas_meter, input_data) + salt: &[u8], + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + (**self).instantiate(code, value, gas_meter, input_data, salt) } fn transfer( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, ) -> Result<(), DispatchError> { (**self).transfer(to, value) } fn terminate( &mut self, - beneficiary: &u64, + beneficiary: &AccountIdOf, ) -> Result<(), DispatchError> { (**self).terminate(beneficiary) } fn call( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, gas_meter: &mut GasMeter, input_data: Vec, @@ -389,11 +470,11 @@ mod tests { } fn restore_to( &mut self, - dest: u64, + dest: AccountIdOf, code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(), &'static str> { + ) -> Result<(), DispatchError> { (**self).restore_to( dest, code_hash, @@ -401,10 +482,10 @@ mod tests { delta, ) } - fn caller(&self) -> &u64 { + fn caller(&self) -> &AccountIdOf { (**self).caller() } - fn address(&self) -> &u64 { + fn address(&self) -> &AccountIdOf { (**self).address() } fn balance(&self) -> u64 { @@ -443,6 +524,9 @@ mod tests { fn get_weight_price(&self, weight: Weight) -> BalanceOf { (**self).get_weight_price(weight) } + fn schedule(&self) -> &Schedule { + (**self).schedule() + } } fn execute( @@ -450,24 +534,15 @@ mod tests { input_data: Vec, ext: E, gas_meter: &mut GasMeter, - ) -> ExecResult { - use crate::exec::Vm; - + ) -> ExecResult + where + ::AccountId: + UncheckedFrom<::Hash> + AsRef<[u8]> + { let wasm = wat::parse_str(wat).unwrap(); let schedule = crate::Schedule::default(); - let prefab_module = - prepare_contract::(&wasm, &schedule).unwrap(); - - let exec = WasmExecutable { - // Use a "call" convention. - entrypoint_name: "call", - prefab_module, - }; - - let cfg = Default::default(); - let vm = WasmVm::new(&cfg); - - vm.execute(&exec, ext, input_data, gas_meter) + let executable = PrefabWasmModule::::from_code(wasm, &schedule).unwrap(); + executable.execute(ext, &ExportedFunction::Call, input_data, gas_meter) } const CODE_TRANSFER: &str = r#" @@ -484,21 +559,23 @@ mod tests { (drop (call $seal_transfer (i32.const 4) ;; Pointer to "account" address. - (i32.const 8) ;; Length of "account" address. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 32) ;; Length of "account" address. + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. ) ) ) (func (export "deploy")) - ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\07\00\00\00\00\00\00\00") + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\99\00\00\00\00\00\00\00") + (data (i32.const 36) "\99\00\00\00\00\00\00\00") ) "#; @@ -515,7 +592,7 @@ mod tests { assert_eq!( &mock_ext.transfers, &[TransferEntry { - to: 7, + to: ALICE, value: 153, data: Vec::new(), }] @@ -541,11 +618,11 @@ mod tests { (drop (call $seal_call (i32.const 4) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address + (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case @@ -554,14 +631,17 @@ mod tests { ) (func (export "deploy")) - ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\06\00\00\00\00\00\00\00") + (data (i32.const 36) "\06\00\00\00\00\00\00\00") - (data (i32.const 20) "\01\02\03\04") + (data (i32.const 44) "\01\02\03\04") ) "#; @@ -578,7 +658,7 @@ mod tests { assert_eq!( &mock_ext.transfers, &[TransferEntry { - to: 9, + to: ALICE, value: 6, data: vec![1, 2, 3, 4], }] @@ -601,7 +681,9 @@ mod tests { ;; output_ptr: u32, ;; output_len_ptr: u32 ;; ) -> u32 - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) (func (export "call") (drop @@ -617,11 +699,15 @@ mod tests { (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 4) ;; salt_len ) ) ) (func (export "deploy")) + ;; Salt + (data (i32.const 0) "\42\43\44\45") ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. (data (i32.const 4) "\03\00\00\00\00\00\00\00") @@ -645,14 +731,18 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!( - &mock_ext.instantiates, - &[InstantiateEntry { - code_hash: [0x11; 32].into(), + assert_matches!( + &mock_ext.instantiates[..], + [InstantiateEntry { + code_hash, endowment: 3, - data: vec![1, 2, 3, 4], - gas_left: 9392302058, - }] + data, + gas_left: _, + salt, + }] if + code_hash == &[0x11; 32].into() && + data == &vec![1, 2, 3, 4] && + salt == &vec![0x42, 0x43, 0x44, 0x45] ); } @@ -667,14 +757,16 @@ mod tests { (func (export "call") (call $seal_terminate (i32.const 4) ;; Pointer to "beneficiary" address. - (i32.const 8) ;; Length of "beneficiary" address. + (i32.const 32) ;; Length of "beneficiary" address. ) ) (func (export "deploy")) ;; Beneficiary AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ) "#; @@ -691,7 +783,7 @@ mod tests { assert_eq!( &mock_ext.terminations, &[TerminationEntry { - beneficiary: 0x09, + beneficiary: ALICE, }] ); } @@ -715,11 +807,11 @@ mod tests { (drop (call $seal_call (i32.const 4) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 228) ;; How much gas to devote for the execution. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address + (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this cas @@ -729,13 +821,15 @@ mod tests { (func (export "deploy")) ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\06\00\00\00\00\00\00\00") + (data (i32.const 36) "\06\00\00\00\00\00\00\00") - (data (i32.const 20) "\01\02\03\04") + (data (i32.const 44) "\01\02\03\04") ) "#; @@ -752,7 +846,7 @@ mod tests { assert_eq!( &mock_ext.transfers, &[TransferEntry { - to: 9, + to: ALICE, value: 6, data: vec![1, 2, 3, 4], }] @@ -862,19 +956,19 @@ mod tests { ;; fill the buffer with the caller. (call $seal_caller (i32.const 0) (i32.const 32)) - ;; assert len == 8 + ;; assert len == 32 (call $assert (i32.eq (i32.load (i32.const 32)) - (i32.const 8) + (i32.const 32) ) ) - ;; assert that contents of the buffer is equal to the i64 value of 42. + ;; assert that the first 64 byte are the beginning of "ALICE" (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 42) + (i64.const 0x0101010101010101) ) ) ) @@ -915,19 +1009,19 @@ mod tests { ;; fill the buffer with the self address. (call $seal_address (i32.const 0) (i32.const 32)) - ;; assert size == 8 + ;; assert size == 32 (call $assert (i32.eq (i32.load (i32.const 32)) - (i32.const 8) + (i32.const 32) ) ) - ;; assert that contents of the buffer is equal to the i64 value of 69. + ;; assert that the first 64 byte are the beginning of "BOB" (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 69) + (i64.const 0x0202020202020202) ) ) ) @@ -1461,7 +1555,7 @@ mod tests { vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00]) ]); - assert_eq!(gas_meter.gas_left(), 9834099446); + assert!(gas_meter.gas_left() > 0); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" @@ -1504,7 +1598,7 @@ mod tests { &mut gas_meter ), Err(ExecError { - error: Error::::ContractTrapped.into(), + error: Error::::TooManyTopics.into(), origin: ErrorOrigin::Caller, }) ); @@ -1549,7 +1643,7 @@ mod tests { &mut gas_meter ), Err(ExecError { - error: Error::::ContractTrapped.into(), + error: Error::::DuplicateTopics.into(), origin: ErrorOrigin::Caller, }) ); diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 171fca6339fd3..caf6ef88c1ba0 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -1,32 +1,32 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module takes care of loading, checking and preprocessing of a //! wasm module before execution. It also extracts some essential information //! from a module. -use crate::wasm::env_def::ImportSatisfyCheck; -use crate::wasm::PrefabWasmModule; -use crate::{Schedule, Trait}; - +use crate::{ + Schedule, Config, + chain_extension::ChainExtension, + wasm::{PrefabWasmModule, env_def::ImportSatisfyCheck}, +}; use parity_wasm::elements::{self, Internal, External, MemoryType, Type, ValueType}; -use pwasm_utils; -use pwasm_utils::rules; +use sp_runtime::traits::Hash; use sp_std::prelude::*; -use sp_runtime::traits::{SaturatedConversion}; /// Currently, all imported functions must be located inside this module. We might support /// additional modules for versioning later. @@ -36,13 +36,13 @@ pub const IMPORT_MODULE_FN: &str = "seal0"; /// compiler toolchains might not support specifying other modules than "env" for memory imports. pub const IMPORT_MODULE_MEMORY: &str = "env"; -struct ContractModule<'a, T: Trait> { +struct ContractModule<'a, T: Config> { /// A deserialized module. The module is valid (this is Guaranteed by `new` method). module: elements::Module, schedule: &'a Schedule, } -impl<'a, T: Trait> ContractModule<'a, T> { +impl<'a, T: Config> ContractModule<'a, T> { /// Creates a new instance of `ContractModule`. /// /// Returns `Err` if the `original_code` couldn't be decoded or @@ -101,6 +101,33 @@ impl<'a, T: Trait> ContractModule<'a, T> { Ok(()) } + /// Ensure that any `br_table` instruction adheres to its immediate value limit. + fn ensure_br_table_size_limit(&self, limit: u32) -> Result<(), &'static str> { + let code_section = if let Some(type_section) = self.module.code_section() { + type_section + } else { + return Ok(()); + }; + for instr in code_section.bodies().iter().flat_map(|body| body.code().elements()) { + use parity_wasm::elements::Instruction::BrTable; + if let BrTable(table) = instr { + if table.table.len() > limit as usize { + return Err("BrTable's immediate value is too big.") + } + } + } + Ok(()) + } + + fn ensure_global_variable_limit(&self, limit: u32) -> Result<(), &'static str> { + if let Some(global_section) = self.module.global_section() { + if global_section.entries().len() > limit as usize { + return Err("module declares too many globals") + } + } + Ok(()) + } + /// Ensures that no floating point types are in use. fn ensure_no_floating_types(&self) -> Result<(), &'static str> { if let Some(global_section) = self.module.global_section() { @@ -145,15 +172,25 @@ impl<'a, T: Trait> ContractModule<'a, T> { Ok(()) } - fn inject_gas_metering(self) -> Result { - let gas_rules = - rules::Set::new( - self.schedule.instruction_weights.regular.clone().saturated_into(), - Default::default(), - ) - .with_grow_cost(self.schedule.instruction_weights.grow_mem.clone().saturated_into()) - .with_forbidden_floats(); + /// Ensure that no function exists that has more parameters than allowed. + fn ensure_parameter_limit(&self, limit: u32) -> Result<(), &'static str> { + let type_section = if let Some(type_section) = self.module.type_section() { + type_section + } else { + return Ok(()); + }; + for Type::Function(func) in type_section.types() { + if func.params().len() > limit as usize { + return Err("Use of a function type with too many parameters."); + } + } + + Ok(()) + } + + fn inject_gas_metering(self) -> Result { + let gas_rules = self.schedule.rules(&self.module); let contract_module = pwasm_utils::inject_gas_counter( self.module, &gas_rules, @@ -167,7 +204,8 @@ impl<'a, T: Trait> ContractModule<'a, T> { fn inject_stack_height_metering(self) -> Result { let contract_module = - pwasm_utils::stack_height::inject_limiter(self.module, self.schedule.max_stack_height) + pwasm_utils::stack_height + ::inject_limiter(self.module, self.schedule.limits.stack_height) .map_err(|_| "stack height instrumentation failed")?; Ok(ContractModule { module: contract_module, @@ -318,6 +356,12 @@ impl<'a, T: Trait> ContractModule<'a, T> { return Err("module imports `seal_println` but debug features disabled"); } + if !T::ChainExtension::enabled() && + import.field().as_bytes() == b"seal_call_chain_extension" + { + return Err("module uses chain extensions but chain extensions are disabled"); + } + if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) || !C::can_satisfy(import.field().as_bytes(), func_ty) { @@ -333,7 +377,7 @@ impl<'a, T: Trait> ContractModule<'a, T> { } } -fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule) +fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule) -> Result<(u32, u32), &'static str> { if let Some(memory_type) = module { @@ -345,7 +389,7 @@ fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule< "Requested initial number of pages should not exceed the requested maximum", ); } - (_, Some(maximum)) if maximum > schedule.max_memory_pages => { + (_, Some(maximum)) if maximum > schedule.limits.memory_pages => { return Err("Maximum number of pages should not exceed the configured maximum."); } (initial, Some(maximum)) => Ok((initial, maximum)), @@ -363,26 +407,18 @@ fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule< } } -/// Loads the given module given in `original_code`, performs some checks on it and -/// does some preprocessing. -/// -/// The checks are: -/// -/// - provided code is a valid wasm module. -/// - the module doesn't define an internal memory instance, -/// - imported memory (if any) doesn't reserve more memory than permitted by the `schedule`, -/// - all imported functions from the external environment matches defined by `env` module, -/// -/// The preprocessing includes injecting code for gas metering and metering the height of stack. -pub fn prepare_contract( +fn check_and_instrument( original_code: &[u8], schedule: &Schedule, -) -> Result { - let mut contract_module = ContractModule::new(original_code, schedule)?; +) -> Result<(Vec, (u32, u32)), &'static str> { + let contract_module = ContractModule::new(&original_code, schedule)?; contract_module.scan_exports()?; contract_module.ensure_no_internal_memory()?; - contract_module.ensure_table_size_limit(schedule.max_table_size)?; + contract_module.ensure_table_size_limit(schedule.limits.table_size)?; + contract_module.ensure_global_variable_limit(schedule.limits.globals)?; contract_module.ensure_no_floating_types()?; + contract_module.ensure_parameter_limit(schedule.limits.parameters)?; + contract_module.ensure_br_table_size_limit(schedule.limits.br_table_size)?; // We disallow importing `gas` function here since it is treated as implementation detail. let disallowed_imports = [b"gas".as_ref()]; @@ -391,19 +427,65 @@ pub fn prepare_contract( schedule )?; - contract_module = contract_module + let code = contract_module .inject_gas_metering()? - .inject_stack_height_metering()?; + .inject_stack_height_metering()? + .into_wasm_code()?; + Ok((code, memory_limits)) +} + +fn do_preparation( + original_code: Vec, + schedule: &Schedule, +) -> Result, &'static str> { + let (code, (initial, maximum)) = check_and_instrument::( + original_code.as_ref(), + schedule, + )?; Ok(PrefabWasmModule { schedule_version: schedule.version, - initial: memory_limits.0, - maximum: memory_limits.1, + initial, + maximum, _reserved: None, - code: contract_module.into_wasm_code()?, + code, + original_code_len: original_code.len() as u32, + refcount: 1, + code_hash: T::Hashing::hash(&original_code), + original_code: Some(original_code), }) } +/// Loads the given module given in `original_code`, performs some checks on it and +/// does some preprocessing. +/// +/// The checks are: +/// +/// - provided code is a valid wasm module. +/// - the module doesn't define an internal memory instance, +/// - imported memory (if any) doesn't reserve more memory than permitted by the `schedule`, +/// - all imported functions from the external environment matches defined by `env` module, +/// +/// The preprocessing includes injecting code for gas metering and metering the height of stack. +pub fn prepare_contract( + original_code: Vec, + schedule: &Schedule, +) -> Result, &'static str> { + do_preparation::(original_code, schedule) +} + +/// The same as [`prepare_contract`] but without constructing a new [`PrefabWasmModule`] +/// +/// # Note +/// +/// Use this when an existing contract should be re-instrumented with a newer schedule version. +pub fn reinstrument_contract( + original_code: Vec, + schedule: &Schedule, +) -> Result, &'static str> { + Ok(check_and_instrument::(&original_code, schedule)?.0) +} + /// Alternate (possibly unsafe) preparation functions used only for benchmarking. /// /// For benchmarking we need to construct special contracts that might not pass our @@ -412,9 +494,7 @@ pub fn prepare_contract( /// in production code. #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking { - use super::{ - Trait, ContractModule, PrefabWasmModule, ImportSatisfyCheck, Schedule, get_memory_limits - }; + use super::*; use parity_wasm::elements::FunctionType; impl ImportSatisfyCheck for () { @@ -424,10 +504,10 @@ pub mod benchmarking { } /// Prepare function that neither checks nor instruments the passed in code. - pub fn prepare_contract(original_code: &[u8], schedule: &Schedule) - -> Result + pub fn prepare_contract(original_code: Vec, schedule: &Schedule) + -> Result, &'static str> { - let contract_module = ContractModule::new(original_code, schedule)?; + let contract_module = ContractModule::new(&original_code, schedule)?; let memory_limits = get_memory_limits(contract_module.scan_imports::<()>(&[])?, schedule)?; Ok(PrefabWasmModule { schedule_version: schedule.version, @@ -435,6 +515,10 @@ pub mod benchmarking { maximum: memory_limits.1, _reserved: None, code: contract_module.into_wasm_code()?, + original_code_len: original_code.len() as u32, + refcount: 1, + code_hash: T::Hashing::hash(&original_code), + original_code: Some(original_code), }) } } @@ -442,36 +526,52 @@ pub mod benchmarking { #[cfg(test)] mod tests { use super::*; - use crate::exec::Ext; + use crate::{exec::Ext, Limits}; use std::fmt; use assert_matches::assert_matches; - impl fmt::Debug for PrefabWasmModule { + impl fmt::Debug for PrefabWasmModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "PreparedContract {{ .. }}") } } - // Define test environment for tests. We need ImportSatisfyCheck - // implementation from it. So actual implementations doesn't matter. - define_env!(TestEnv, , - panic(_ctx) => { unreachable!(); }, + /// Using unreachable statements triggers unreachable warnings in the generated code + #[allow(unreachable_code)] + mod env { + use super::*; - // gas is an implementation defined function and a contract can't import it. - gas(_ctx, _amount: u32) => { unreachable!(); }, + // Define test environment for tests. We need ImportSatisfyCheck + // implementation from it. So actual implementations doesn't matter. + define_env!(Test, , + panic(_ctx) => { unreachable!(); }, - nop(_ctx, _unused: u64) => { unreachable!(); }, + // gas is an implementation defined function and a contract can't import it. + gas(_ctx, _amount: u32) => { unreachable!(); }, - seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, - ); + nop(_ctx, _unused: u64) => { unreachable!(); }, + + seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, + ); + } macro_rules! prepare_test { ($name:ident, $wat:expr, $($expected:tt)*) => { #[test] fn $name() { let wasm = wat::parse_str($wat).unwrap(); - let schedule = Schedule::default(); - let r = prepare_contract::(wasm.as_ref(), &schedule); + let schedule = Schedule { + limits: Limits { + globals: 3, + parameters: 3, + memory_pages: 16, + table_size: 3, + br_table_size: 3, + .. Default::default() + }, + .. Default::default() + }; + let r = do_preparation::(wasm, &schedule); assert_matches!(r, $($expected)*); } }; @@ -493,14 +593,66 @@ mod tests { Err("gas instrumentation failed") ); - mod memories { + mod functions { use super::*; - // Tests below assumes that maximum page number is configured to a certain number. - #[test] - fn assume_memory_size() { - assert_eq!(>::default().max_memory_pages, 16); - } + prepare_test!(param_number_valid, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func (param i32 i32 i32)) + ) + "#, + Ok(_) + ); + + prepare_test!(param_number_invalid, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func (param i32 i32 i32 i32)) + (func (param i32)) + ) + "#, + Err("Use of a function type with too many parameters.") + ); + } + + mod globals { + use super::*; + + prepare_test!(global_number_valid, + r#" + (module + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (func (export "call")) + (func (export "deploy")) + ) + "#, + Ok(_) + ); + + prepare_test!(global_number_too_high, + r#" + (module + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (func (export "call")) + (func (export "deploy")) + ) + "#, + Err("module declares too many globals") + ); + } + + mod memories { + use super::*; prepare_test!(memory_with_one_page, r#" @@ -561,6 +713,18 @@ mod tests { Err("Maximum number of pages should be always declared.") ); + prepare_test!(requested_maximum_valid, + r#" + (module + (import "env" "memory" (memory 1 16)) + + (func (export "call")) + (func (export "deploy")) + ) + "#, + Ok(_) + ); + prepare_test!(requested_maximum_exceeds_configured_maximum, r#" (module @@ -625,12 +789,6 @@ mod tests { mod tables { use super::*; - // Tests below assumes that maximum table size is configured to a certain number. - #[test] - fn assume_table_size() { - assert_eq!(>::default().max_table_size, 16384); - } - prepare_test!(no_tables, r#" (module @@ -644,7 +802,7 @@ mod tests { prepare_test!(table_valid_size, r#" (module - (table 10000 funcref) + (table 3 funcref) (func (export "call")) (func (export "deploy")) @@ -656,13 +814,40 @@ mod tests { prepare_test!(table_too_big, r#" (module - (table 20000 funcref) + (table 4 funcref) (func (export "call")) (func (export "deploy")) )"#, Err("table exceeds maximum size allowed") ); + + prepare_test!(br_table_valid_size, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func + i32.const 0 + br_table 0 0 0 0 + ) + ) + "#, + Ok(_) + ); + + prepare_test!(br_table_too_big, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func + i32.const 0 + br_table 0 0 0 0 0 + ) + )"#, + Err("BrTable's immediate value is too big.") + ); } mod imports { @@ -797,7 +982,7 @@ mod tests { ).unwrap(); let mut schedule = Schedule::default(); schedule.enable_println = true; - let r = prepare_contract::(wasm.as_ref(), &schedule); + let r = do_preparation::(wasm, &schedule); assert_matches!(r, Ok(_)); } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index ffc816aa4c82f..9dd098e852666 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1,34 +1,34 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Environment definition of the wasm smart-contract runtime. use crate::{ - HostFnWeights, Schedule, Trait, CodeHash, BalanceOf, Error, + HostFnWeights, Config, CodeHash, BalanceOf, Error, exec::{Ext, StorageKey, TopicOf}, - gas::{Gas, GasMeter, Token, GasMeterResult}, + gas::{Gas, GasMeter, Token, GasMeterResult, ChargedAmount}, wasm::env_def::ConvertibleToWasm, }; -use sp_sandbox; use parity_wasm::elements::ValueType; -use frame_system; -use frame_support::dispatch::DispatchError; +use frame_support::{dispatch::DispatchError, ensure}; use sp_std::prelude::*; -use codec::{Decode, Encode}; +use codec::{Decode, DecodeAll, Encode}; use sp_runtime::traits::SaturatedConversion; +use sp_core::crypto::UncheckedFrom; use sp_io::hashing::{ keccak_256, blake2_256, @@ -38,6 +38,13 @@ use sp_io::hashing::{ use pallet_contracts_primitives::{ExecResult, ExecReturnValue, ReturnFlags, ExecError}; /// Every error that can be returned to a contract when it calls any of the host functions. +/// +/// # Note +/// +/// This enum can be extended in the future: New codes can be added but existing codes +/// will not be changed or removed. This means that any contract **must not** exhaustively +/// match return codes. Instead, contracts should prepare for unknown variants and deal with +/// those errors gracefuly in order to be forward compatible. #[repr(u32)] pub enum ReturnCode { /// API call successful. @@ -89,7 +96,7 @@ impl From for ReturnCode { } /// The data passed through when a contract uses `seal_return`. -struct ReturnData { +pub struct ReturnData { /// The flags as passed through by the contract. They are still unchecked and /// will later be parsed into a `ReturnFlags` bitflags struct. flags: u32, @@ -103,7 +110,7 @@ struct ReturnData { /// occurred (the SupervisorError variant). /// The other case is where the trap does not constitute an error but rather was invoked /// as a quick way to terminate the application (all other variants). -enum TrapReason { +pub enum TrapReason { /// The supervisor trapped the contract because of an error condition occurred during /// execution in privileged code. SupervisorError(DispatchError), @@ -116,89 +123,9 @@ enum TrapReason { Restoration, } -/// Can only be used for one call. -pub(crate) struct Runtime<'a, E: Ext + 'a> { - ext: &'a mut E, - input_data: Option>, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - trap_reason: Option, -} -impl<'a, E: Ext + 'a> Runtime<'a, E> { - pub(crate) fn new( - ext: &'a mut E, - input_data: Vec, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - ) -> Self { - Runtime { - ext, - input_data: Some(input_data), - schedule, - memory, - gas_meter, - trap_reason: None, - } - } -} - -/// Converts the sandbox result and the runtime state into the execution outcome. -/// -/// It evaluates information stored in the `trap_reason` variable of the runtime and -/// bases the outcome on the value if this variable. Only if `trap_reason` is `None` -/// the result of the sandbox is evaluated. -pub(crate) fn to_execution_result( - runtime: Runtime, - sandbox_result: Result, -) -> ExecResult { - // If a trap reason is set we base our decision solely on that. - if let Some(trap_reason) = runtime.trap_reason { - return match trap_reason { - // The trap was the result of the execution `return` host function. - TrapReason::Return(ReturnData{ flags, data }) => { - let flags = ReturnFlags::from_bits(flags).ok_or_else(|| - "used reserved bit in return flags" - )?; - Ok(ExecReturnValue { - flags, - data, - }) - }, - TrapReason::Termination => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - TrapReason::Restoration => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - TrapReason::SupervisorError(error) => Err(error)?, - } - } - - // Check the exact type of the error. - match sandbox_result { - // No traps were generated. Proceed normally. - Ok(_) => { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) - } - // `Error::Module` is returned only if instantiation or linking failed (i.e. - // wasm binary tried to import a function that is not provided by the host). - // This shouldn't happen because validation process ought to reject such binaries. - // - // Because panics are really undesirable in the runtime code, we treat this as - // a trap for now. Eventually, we might want to revisit this. - Err(sp_sandbox::Error::Module) => - Err("validation error")?, - // Any other kind of a trap should result in a failure. - Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err(Error::::ContractTrapped)? +impl> From for TrapReason { + fn from(from: T) -> Self { + Self::SupervisorError(from.into()) } } @@ -262,10 +189,10 @@ pub enum RuntimeToken { CallSurchargeTransfer, /// Weight of output received through `seal_call` for the given size. CallCopyOut(u32), - /// Weight of calling `seal_instantiate` for the given input size without output weight. + /// Weight of calling `seal_instantiate` for the given input and salt without output weight. /// This includes the transfer as an instantiate without a value will always be below /// the existential deposit and is disregarded as corner case. - InstantiateBase(u32), + InstantiateBase{input_data_len: u32, salt_len: u32}, /// Weight of output received through `seal_instantiate` for the given size. InstantiateCopyOut(u32), /// Weight of calling `seal_hash_sha_256` for the given input size. @@ -276,10 +203,17 @@ pub enum RuntimeToken { HashBlake256(u32), /// Weight of calling `seal_hash_blake2_128` for the given input size. HashBlake128(u32), + /// Weight charged by a chain extension through `seal_call_chain_extension`. + ChainExtension(u64), + /// Weight charged for copying data from the sandbox. + CopyIn(u32), } -impl Token for RuntimeToken { - type Metadata = HostFnWeights; +impl Token for RuntimeToken +where + T::AccountId: UncheckedFrom, T::AccountId: AsRef<[u8]> +{ + type Metadata = HostFnWeights; fn calculate_amount(&self, s: &Self::Metadata) -> Gas { use self::RuntimeToken::*; @@ -318,8 +252,9 @@ impl Token for RuntimeToken { .saturating_add(s.call_per_input_byte.saturating_mul(len.into())), CallSurchargeTransfer => s.call_transfer_surcharge, CallCopyOut(len) => s.call_per_output_byte.saturating_mul(len.into()), - InstantiateBase(len) => s.instantiate - .saturating_add(s.instantiate_per_input_byte.saturating_mul(len.into())), + InstantiateBase{input_data_len, salt_len} => s.instantiate + .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) + .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), InstantiateCopyOut(len) => s.instantiate_per_output_byte .saturating_mul(len.into()), HashSha256(len) => s.hash_sha2_256 @@ -330,219 +265,337 @@ impl Token for RuntimeToken { .saturating_add(s.hash_blake2_256_per_byte.saturating_mul(len.into())), HashBlake128(len) => s.hash_blake2_128 .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), + ChainExtension(amount) => amount, + CopyIn(len) => s.return_per_byte.saturating_mul(len.into()), } } } -/// Charge the gas meter with the specified token. -/// -/// Returns `Err(HostError)` if there is not enough gas. -fn charge_gas(ctx: &mut Runtime, token: Tok) -> Result<(), sp_sandbox::HostError> -where - E: Ext, - Tok: Token, -{ - match ctx.gas_meter.charge(&ctx.schedule.host_fn_weights, token) { - GasMeterResult::Proceed => Ok(()), - GasMeterResult::OutOfGas => { - ctx.trap_reason = Some(TrapReason::SupervisorError(Error::::OutOfGas.into())); - Err(sp_sandbox::HostError) - }, - } +/// This is only appropriate when writing out data of constant size that does not depend on user +/// input. In this case the costs for this copy was already charged as part of the token at +/// the beginning of the API entry point. +fn already_charged(_: u32) -> Option { + None } -/// Read designated chunk from the sandbox memory. -/// -/// Returns `Err` if one of the following conditions occurs: +/// Finds duplicates in a given vector. /// -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result, sp_sandbox::HostError> { - let mut buf = vec![0u8; len as usize]; - ctx.memory.get(ptr, buf.as_mut_slice()) - .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; - Ok(buf) +/// This function has complexity of O(n log n) and no additional memory is required, although +/// the order of items is not preserved. +fn has_duplicates>(items: &mut Vec) -> bool { + // Sort the vector + items.sort_by(|a, b| { + Ord::cmp(a.as_ref(), b.as_ref()) + }); + // And then find any two consecutive equal elements. + items.windows(2).any(|w| { + match w { + &[ref a, ref b] => a == b, + _ => false, + } + }) } -/// Read designated chunk from the sandbox memory into the supplied buffer. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory_into_buf( - ctx: &mut Runtime, - ptr: u32, - buf: &mut [u8], -) -> Result<(), sp_sandbox::HostError> { - ctx.memory.get(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) +/// Can only be used for one call. +pub struct Runtime<'a, E: Ext + 'a> { + ext: &'a mut E, + input_data: Option>, + memory: sp_sandbox::Memory, + gas_meter: &'a mut GasMeter, + trap_reason: Option, } -/// Read designated chunk from the sandbox memory and attempt to decode into the specified type. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -/// - the buffer contents cannot be decoded as the required type. -fn read_sandbox_memory_as( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result { - let buf = read_sandbox_memory(ctx, ptr, len)?; - D::decode(&mut &buf[..]).map_err(|_| store_err(ctx, Error::::DecodingFailed)) -} +impl<'a, E> Runtime<'a, E> +where + E: Ext + 'a, + ::AccountId: + UncheckedFrom<::Hash> + AsRef<[u8]> +{ + pub fn new( + ext: &'a mut E, + input_data: Vec, + memory: sp_sandbox::Memory, + gas_meter: &'a mut GasMeter, + ) -> Self { + Runtime { + ext, + input_data: Some(input_data), + memory, + gas_meter, + trap_reason: None, + } + } -/// Write the given buffer to the designated location in the sandbox memory. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - designated area is not within the bounds of the sandbox memory. -fn write_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - buf: &[u8], -) -> Result<(), sp_sandbox::HostError> { - ctx.memory.set(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) -} + /// Converts the sandbox result and the runtime state into the execution outcome. + /// + /// It evaluates information stored in the `trap_reason` variable of the runtime and + /// bases the outcome on the value if this variable. Only if `trap_reason` is `None` + /// the result of the sandbox is evaluated. + pub fn to_execution_result( + self, + sandbox_result: Result, + ) -> ExecResult { + // If a trap reason is set we base our decision solely on that. + if let Some(trap_reason) = self.trap_reason { + return match trap_reason { + // The trap was the result of the execution `return` host function. + TrapReason::Return(ReturnData{ flags, data }) => { + let flags = ReturnFlags::from_bits(flags).ok_or_else(|| + "used reserved bit in return flags" + )?; + Ok(ExecReturnValue { + flags, + data, + }) + }, + TrapReason::Termination => { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }) + }, + TrapReason::Restoration => { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }) + }, + TrapReason::SupervisorError(error) => Err(error)?, + } + } -/// Write the given buffer and its length to the designated locations in sandbox memory and -/// charge gas according to the token returned by `create_token`. -// -/// `out_ptr` is the location in sandbox memory where `buf` should be written to. -/// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the -/// length of the buffer located at `out_ptr`. If that buffer is large enough the actual -/// `buf.len()` is written to this location. -/// -/// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the -/// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying -/// output optional. For example to skip copying back the output buffer of an `seal_call` -/// when the caller is not interested in the result. -/// -/// `create_token` can optionally instruct this function to charge the gas meter with the token -/// it returns. `create_token` receives the variable amount of bytes that are about to be copied by -/// this function. -/// -/// In addition to the error conditions of `write_sandbox_memory` this functions returns -/// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. -fn write_sandbox_output( - ctx: &mut Runtime, - out_ptr: u32, - out_len_ptr: u32, - buf: &[u8], - allow_skip: bool, - create_token: impl FnOnce(u32) -> Option, -) -> Result<(), sp_sandbox::HostError> { - if allow_skip && out_ptr == u32::max_value() { - return Ok(()); + // Check the exact type of the error. + match sandbox_result { + // No traps were generated. Proceed normally. + Ok(_) => { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + } + // `Error::Module` is returned only if instantiation or linking failed (i.e. + // wasm binary tried to import a function that is not provided by the host). + // This shouldn't happen because validation process ought to reject such binaries. + // + // Because panics are really undesirable in the runtime code, we treat this as + // a trap for now. Eventually, we might want to revisit this. + Err(sp_sandbox::Error::Module) => + Err("validation error")?, + // Any other kind of a trap should result in a failure. + Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => + Err(Error::::ContractTrapped)? + } } - let buf_len = buf.len() as u32; - let len: u32 = read_sandbox_memory_as(ctx, out_len_ptr, 4)?; + /// Get a mutable reference to the inner `Ext`. + /// + /// This is mainly for the chain extension to have access to the environment the + /// contract is executing in. + pub fn ext(&mut self) -> &mut E { + self.ext + } - if len < buf_len { - Err(store_err(ctx, Error::::OutputBufferTooSmall))? + /// Store the reason for a host function triggered trap. + /// + /// This is called by the `define_env` macro in order to store any error returned by + /// the host functions defined through the said macro. It should **not** be called + /// manually. + pub fn set_trap_reason(&mut self, reason: TrapReason) { + self.trap_reason = Some(reason); } - if let Some(token) = create_token(buf_len) { - charge_gas(ctx, token)?; + /// Charge the gas meter with the specified token. + /// + /// Returns `Err(HostError)` if there is not enough gas. + pub fn charge_gas(&mut self, token: Tok) -> Result + where + Tok: Token>, + { + match self.gas_meter.charge(&self.ext.schedule().host_fn_weights, token) { + GasMeterResult::Proceed(amount) => Ok(amount), + GasMeterResult::OutOfGas => Err(Error::::OutOfGas.into()) + } } - ctx.memory.set(out_ptr, buf).and_then(|_| { - ctx.memory.set(out_len_ptr, &buf_len.encode()) - }) - .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; + /// Read designated chunk from the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + pub fn read_sandbox_memory(&self, ptr: u32, len: u32) + -> Result, DispatchError> + { + ensure!(len <= self.ext.schedule().limits.max_memory_size(), Error::::OutOfBounds); + let mut buf = vec![0u8; len as usize]; + self.memory.get(ptr, buf.as_mut_slice()) + .map_err(|_| Error::::OutOfBounds)?; + Ok(buf) + } - Ok(()) -} + /// Read designated chunk from the sandbox memory into the supplied buffer. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + pub fn read_sandbox_memory_into_buf(&self, ptr: u32, buf: &mut [u8]) + -> Result<(), DispatchError> + { + self.memory.get(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) + } -/// Supply to `write_sandbox_output` to indicate that the gas meter should not be charged. -/// -/// This is only appropriate when writing out data of constant size that does not depend on user -/// input. In this case the costs for this copy was already charged as part of the token at -/// the beginning of the API entry point. -fn already_charged(_: u32) -> Option { - None -} + /// Read designated chunk from the sandbox memory and attempt to decode into the specified type. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + /// - the buffer contents cannot be decoded as the required type. + /// + /// # Note + /// + /// It is safe to forgo benchmarking and charging weight relative to `len` for fixed + /// size types (basically everything not containing a heap collection): + /// Despite the fact that we are usually about to read the encoding of a fixed size + /// type, we cannot know the encoded size of that type. We therefore are required to + /// use the length provided by the contract. This length is untrusted and therefore + /// we charge weight relative to the provided size upfront that covers the copy costs. + /// On success this cost is refunded as the copying was already covered in the + /// overall cost of the host function. This is different from `read_sandbox_memory` + /// where the size is dynamic and the costs resulting from that dynamic size must + /// be charged relative to this dynamic size anyways (before reading) by constructing + /// the benchmark for that. + pub fn read_sandbox_memory_as(&mut self, ptr: u32, len: u32) + -> Result + { + let amount = self.charge_gas(RuntimeToken::CopyIn(len))?; + let buf = self.read_sandbox_memory(ptr, len)?; + let decoded = D::decode_all(&mut &buf[..]) + .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + self.gas_meter.refund(amount); + Ok(decoded) + } -/// Stores a DispatchError returned from an Ext function into the trap_reason. -/// -/// This allows through supervisor generated errors to the caller. -fn store_err(ctx: &mut Runtime, err: Error) -> sp_sandbox::HostError where - E: Ext, - Error: Into, -{ - ctx.trap_reason = Some(TrapReason::SupervisorError(err.into())); - sp_sandbox::HostError -} + /// Write the given buffer and its length to the designated locations in sandbox memory and + /// charge gas according to the token returned by `create_token`. + // + /// `out_ptr` is the location in sandbox memory where `buf` should be written to. + /// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the + /// length of the buffer located at `out_ptr`. If that buffer is large enough the actual + /// `buf.len()` is written to this location. + /// + /// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the + /// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying + /// output optional. For example to skip copying back the output buffer of an `seal_call` + /// when the caller is not interested in the result. + /// + /// `create_token` can optionally instruct this function to charge the gas meter with the token + /// it returns. `create_token` receives the variable amount of bytes that are about to be copied by + /// this function. + /// + /// In addition to the error conditions of `write_sandbox_memory` this functions returns + /// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. + pub fn write_sandbox_output( + &mut self, + out_ptr: u32, + out_len_ptr: u32, + buf: &[u8], + allow_skip: bool, + create_token: impl FnOnce(u32) -> Option, + ) -> Result<(), DispatchError> + { + if allow_skip && out_ptr == u32::max_value() { + return Ok(()); + } -/// Fallible conversion of `DispatchError` to `ReturnCode`. -fn err_into_return_code(from: DispatchError) -> Result { - use ReturnCode::*; - - let below_sub = Error::::BelowSubsistenceThreshold.into(); - let transfer_failed = Error::::TransferFailed.into(); - let not_funded = Error::::NewContractNotFunded.into(); - let no_code = Error::::CodeNotFound.into(); - let invalid_contract = Error::::NotCallable.into(); - - match from { - x if x == below_sub => Ok(BelowSubsistenceThreshold), - x if x == transfer_failed => Ok(TransferFailed), - x if x == not_funded => Ok(NewContractNotFunded), - x if x == no_code => Ok(CodeNotFound), - x if x == invalid_contract => Ok(NotCallable), - err => Err(err) - } -} + let buf_len = buf.len() as u32; + let len: u32 = self.read_sandbox_memory_as(out_len_ptr, 4)?; + + if len < buf_len { + Err(Error::::OutputBufferTooSmall)? + } -/// Fallible conversion of a `ExecResult` to `ReturnCode`. -fn exec_into_return_code(from: ExecResult) -> Result { - use pallet_contracts_primitives::ErrorOrigin::Callee; + if let Some(token) = create_token(buf_len) { + self.charge_gas(token)?; + } - let ExecError { error, origin } = match from { - Ok(retval) => return Ok(retval.into()), - Err(err) => err, - }; + self.memory.set(out_ptr, buf).and_then(|_| { + self.memory.set(out_len_ptr, &buf_len.encode()) + }) + .map_err(|_| Error::::OutOfBounds)?; - match (error, origin) { - (_, Callee) => Ok(ReturnCode::CalleeTrapped), - (err, _) => err_into_return_code::(err) + Ok(()) } -} -/// Used by Runtime API that calls into other contracts. -/// -/// Those need to transform the the `ExecResult` returned from the execution into -/// a `ReturnCode`. If this conversion fails because the `ExecResult` constitutes a -/// a fatal error then this error is stored in the `ExecutionContext` so it can be -/// extracted for display in the UI. -fn map_exec_result(ctx: &mut Runtime, result: ExecResult) - -> Result -{ - match exec_into_return_code::(result) { - Ok(code) => Ok(code), - Err(err) => Err(store_err(ctx, err)), + /// Write the given buffer to the designated location in the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - designated area is not within the bounds of the sandbox memory. + fn write_sandbox_memory(&mut self, ptr: u32, buf: &[u8]) -> Result<(), DispatchError> { + self.memory.set(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) } -} -/// Try to convert an error into a `ReturnCode`. -/// -/// Used to decide between fatal and non-fatal errors. -fn map_dispatch_result(ctx: &mut Runtime, result: Result) - -> Result -{ - let err = if let Err(err) = result { - err - } else { - return Ok(ReturnCode::Success) - }; - - match err_into_return_code::(err) { - Ok(code) => Ok(code), - Err(err) => Err(store_err(ctx, err)), + /// Computes the given hash function on the supplied input. + /// + /// Reads from the sandboxed input buffer into an intermediate buffer. + /// Returns the result directly to the output buffer of the sandboxed memory. + /// + /// It is the callers responsibility to provide an output buffer that + /// is large enough to hold the expected amount of bytes returned by the + /// chosen hash function. + /// + /// # Note + /// + /// The `input` and `output` buffers may overlap. + fn compute_hash_on_intermediate_buffer( + &mut self, + hash_fn: F, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), DispatchError> + where + F: FnOnce(&[u8]) -> R, + R: AsRef<[u8]>, + { + // Copy input into supervisor memory. + let input = self.read_sandbox_memory(input_ptr, input_len)?; + // Compute the hash on the input buffer using the given hash function. + let hash = hash_fn(&input); + // Write the resulting hash back into the sandboxed output buffer. + self.write_sandbox_memory(output_ptr, hash.as_ref())?; + Ok(()) + } + + /// Fallible conversion of `DispatchError` to `ReturnCode`. + fn err_into_return_code(from: DispatchError) -> Result { + use ReturnCode::*; + + let below_sub = Error::::BelowSubsistenceThreshold.into(); + let transfer_failed = Error::::TransferFailed.into(); + let not_funded = Error::::NewContractNotFunded.into(); + let no_code = Error::::CodeNotFound.into(); + let invalid_contract = Error::::NotCallable.into(); + + match from { + x if x == below_sub => Ok(BelowSubsistenceThreshold), + x if x == transfer_failed => Ok(TransferFailed), + x if x == not_funded => Ok(NewContractNotFunded), + x if x == no_code => Ok(CodeNotFound), + x if x == invalid_contract => Ok(NotCallable), + err => Err(err) + } + } + + /// Fallible conversion of a `ExecResult` to `ReturnCode`. + fn exec_into_return_code(from: ExecResult) -> Result { + use pallet_contracts_primitives::ErrorOrigin::Callee; + + let ExecError { error, origin } = match from { + Ok(retval) => return Ok(retval.into()), + Err(err) => err, + }; + + match (error, origin) { + (_, Callee) => Ok(ReturnCode::CalleeTrapped), + (err, _) => Self::err_into_return_code(err) + } } } @@ -567,7 +620,7 @@ define_env!(Env, , // // - amount: How much gas is used. gas(ctx, amount: u32) => { - charge_gas(ctx, RuntimeToken::MeteringBlock(amount))?; + ctx.charge_gas(RuntimeToken::MeteringBlock(amount))?; Ok(()) }, @@ -587,15 +640,14 @@ define_env!(Env, , // - If value length exceeds the configured maximum value length of a storage entry. // - Upon trying to set an empty storage entry (value length is 0). seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { - charge_gas(ctx, RuntimeToken::SetStorage(value_len))?; + ctx.charge_gas(RuntimeToken::SetStorage(value_len))?; if value_len > ctx.ext.max_value_size() { - Err(store_err(ctx, Error::::ValueTooLarge))?; + Err(Error::::ValueTooLarge)?; } let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; - let value = Some(read_sandbox_memory(ctx, value_ptr, value_len)?); - ctx.ext.set_storage(key, value); - Ok(()) + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; + let value = Some(ctx.read_sandbox_memory(value_ptr, value_len)?); + ctx.ext.set_storage(key, value).map_err(Into::into) }, // Clear the value at the given key in the contract storage. @@ -604,11 +656,10 @@ define_env!(Env, , // // - `key_ptr`: pointer into the linear memory where the location to clear the value is placed. seal_clear_storage(ctx, key_ptr: u32) => { - charge_gas(ctx, RuntimeToken::ClearStorage)?; + ctx.charge_gas(RuntimeToken::ClearStorage)?; let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; - ctx.ext.set_storage(key, None); - Ok(()) + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; + ctx.ext.set_storage(key, None).map_err(Into::into) }, // Retrieve the value under the given key from storage. @@ -624,11 +675,11 @@ define_env!(Env, , // // `ReturnCode::KeyNotFound` seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::GetStorageBase)?; + ctx.charge_gas(RuntimeToken::GetStorageBase)?; let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; if let Some(value) = ctx.ext.get_storage(&key) { - write_sandbox_output(ctx, out_ptr, out_len_ptr, &value, false, |len| { + ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, |len| { Some(RuntimeToken::GetStorageCopyOut(len)) })?; Ok(ReturnCode::Success) @@ -659,14 +710,20 @@ define_env!(Env, , value_ptr: u32, value_len: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::Transfer)?; - let callee: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, account_ptr, account_len)?; + ctx.charge_gas(RuntimeToken::Transfer)?; + let callee: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(account_ptr, account_len)?; let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; + ctx.read_sandbox_memory_as(value_ptr, value_len)?; let result = ctx.ext.transfer(&callee, value); - map_dispatch_result(ctx, result) + match result { + Ok(()) => Ok(ReturnCode::Success), + Err(err) => { + let code = Runtime::::err_into_return_code(err)?; + Ok(code) + } + } }, // Make a call to another contract. @@ -712,14 +769,14 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::CallBase(input_data_len))?; - let callee: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, callee_ptr, callee_len)?; - let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; + ctx.charge_gas(RuntimeToken::CallBase(input_data_len))?; + let callee: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(callee_ptr, callee_len)?; + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; + let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; if value > 0u32.into() { - charge_gas(ctx, RuntimeToken::CallSurchargeTransfer)?; + ctx.charge_gas(RuntimeToken::CallSurchargeTransfer)?; } let nested_gas_limit = if gas == 0 { @@ -744,11 +801,11 @@ define_env!(Env, , }); if let Ok(output) = &call_outcome { - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true, |len| { + ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { Some(RuntimeToken::CallCopyOut(len)) })?; } - map_exec_result(ctx, call_outcome) + Ok(Runtime::::exec_into_return_code(call_outcome)?) }, // Instantiate a contract with the specified code hash. @@ -779,6 +836,8 @@ define_env!(Env, , // - output_ptr: a pointer where the output buffer is copied to. // - output_len_ptr: in-out pointer to where the length of the buffer is read from // and the actual length is written to. + // - salt_ptr: Pointer to raw bytes used for address deriviation. See `fn contract_address`. + // - salt_len: length in bytes of the supplied salt. // // # Errors // @@ -806,13 +865,16 @@ define_env!(Env, , address_ptr: u32, address_len_ptr: u32, output_ptr: u32, - output_len_ptr: u32 + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::InstantiateBase(input_data_len))?; + ctx.charge_gas(RuntimeToken::InstantiateBase {input_data_len, salt_len})?; let code_hash: CodeHash<::T> = - read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; - let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; + ctx.read_sandbox_memory_as(code_hash_ptr, code_hash_len)?; + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; + let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; + let salt = ctx.read_sandbox_memory(salt_ptr, salt_len)?; let nested_gas_limit = if gas == 0 { ctx.gas_meter.gas_left() @@ -824,10 +886,11 @@ define_env!(Env, , match nested_meter { Some(nested_meter) => { ext.instantiate( - &code_hash, + code_hash, value, nested_meter, - input_data + input_data, + &salt, ) } // there is not enough gas to allocate for the nested call. @@ -836,15 +899,15 @@ define_env!(Env, , }); if let Ok((address, output)) = &instantiate_outcome { if !output.flags.contains(ReturnFlags::REVERT) { - write_sandbox_output( - ctx, address_ptr, address_len_ptr, &address.encode(), true, already_charged, + ctx.write_sandbox_output( + address_ptr, address_len_ptr, &address.encode(), true, already_charged, )?; } - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true, |len| { + ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { Some(RuntimeToken::InstantiateCopyOut(len)) })?; } - map_exec_result(ctx, instantiate_outcome.map(|(_id, retval)| retval)) + Ok(Runtime::::exec_into_return_code(instantiate_outcome.map(|(_id, retval)| retval))?) }, // Remove the calling account and transfer remaining balance. @@ -861,29 +924,40 @@ define_env!(Env, , // # Traps // // - The contract is live i.e is already on the call stack. + // - Failed to send the balance to the beneficiary. + // - The deletion queue is full. seal_terminate( ctx, beneficiary_ptr: u32, beneficiary_len: u32 ) => { - charge_gas(ctx, RuntimeToken::Terminate)?; - let beneficiary: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, beneficiary_ptr, beneficiary_len)?; + ctx.charge_gas(RuntimeToken::Terminate)?; + let beneficiary: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(beneficiary_ptr, beneficiary_len)?; - if let Ok(_) = ctx.ext.terminate(&beneficiary) { - ctx.trap_reason = Some(TrapReason::Termination); - } - Err(sp_sandbox::HostError) + ctx.ext.terminate(&beneficiary)?; + Err(TrapReason::Termination) }, - seal_input(ctx, buf_ptr: u32, buf_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::InputBase)?; + // Stores the input passed by the caller into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // # Note + // + // This function can only be called once. Calling it multiple times will trigger a trap. + seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeToken::InputBase)?; if let Some(input) = ctx.input_data.take() { - write_sandbox_output(ctx, buf_ptr, buf_len_ptr, &input, false, |len| { + ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { Some(RuntimeToken::InputCopyOut(len)) - }) + })?; + Ok(()) } else { - Err(sp_sandbox::HostError) + Err(Error::::InputAlreadyRead.into()) } }, @@ -905,16 +979,11 @@ define_env!(Env, , // // Using a reserved bit triggers a trap. seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { - charge_gas(ctx, RuntimeToken::Return(data_len))?; - ctx.trap_reason = Some(TrapReason::Return(ReturnData { + ctx.charge_gas(RuntimeToken::Return(data_len))?; + Err(TrapReason::Return(ReturnData { flags, - data: read_sandbox_memory(ctx, data_ptr, data_len)?, - })); - - // The trap mechanism is used to immediately terminate the execution. - // This trap should be handled appropriately before returning the result - // to the user of this crate. - Err(sp_sandbox::HostError) + data: ctx.read_sandbox_memory(data_ptr, data_len)?, + })) }, // Stores the address of the caller into the supplied buffer. @@ -928,10 +997,10 @@ define_env!(Env, , // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the // address of the contract will be returned. The value is encoded as T::AccountId. seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Caller)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::Caller)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged + )?) }, // Stores the address of the current contract into the supplied buffer. @@ -941,10 +1010,10 @@ define_env!(Env, , // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Address)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::Address)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged + )?) }, // Stores the price for the specified amount of gas into the supplied buffer. @@ -961,11 +1030,10 @@ define_env!(Env, , // It is recommended to avoid specifying very small values for `gas` as the prices for a single // gas can be smaller than one. seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::WeightToFee)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, - already_charged - ) + ctx.charge_gas(RuntimeToken::WeightToFee)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged + )?) }, // Stores the amount of gas left into the supplied buffer. @@ -977,10 +1045,10 @@ define_env!(Env, , // // The data is encoded as Gas. seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::GasLeft)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::GasLeft)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false, already_charged + )?) }, // Stores the balance of the current account into the supplied buffer. @@ -992,10 +1060,10 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Balance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::Balance)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged + )?) }, // Stores the value transferred along with this call or as endowment into the supplied buffer. @@ -1007,11 +1075,10 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::ValueTransferred)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, - already_charged - ) + ctx.charge_gas(RuntimeToken::ValueTransferred)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged + )?) }, // Stores a random number for the current block and the given subject into the supplied buffer. @@ -1023,16 +1090,14 @@ define_env!(Env, , // // The data is encoded as T::Hash. seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Random)?; - // The length of a subject can't exceed `max_subject_len`. - if subject_len > ctx.schedule.max_subject_len { - return Err(sp_sandbox::HostError); + ctx.charge_gas(RuntimeToken::Random)?; + if subject_len > ctx.ext.schedule().limits.subject_len { + Err(Error::::RandomSubjectTooLong)?; } - let subject_buf = read_sandbox_memory(ctx, subject_ptr, subject_len)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, - already_charged - ) + let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, already_charged + )?) }, // Load the latest block timestamp into the supplied buffer @@ -1042,20 +1107,20 @@ define_env!(Env, , // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Now)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::Now)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged + )?) }, // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. // // The data is encoded as T::Balance. seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::MinimumBalance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::MinimumBalance)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged + )?) }, // Stores the tombstone deposit into the supplied buffer. @@ -1074,11 +1139,10 @@ define_env!(Env, , // below the sum of existential deposit and the tombstone deposit. The sum // is commonly referred as subsistence threshold in code. seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::TombstoneDeposit)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false, - already_charged - ) + ctx.charge_gas(RuntimeToken::TombstoneDeposit)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false, already_charged + )?) }, // Try to restore the given destination contract sacrificing the caller. @@ -1088,25 +1152,30 @@ define_env!(Env, , // the caller contract and restore the destination contract and set the specified `rent_allowance`. // All caller's funds are transfered to the destination. // - // If there is no tombstone at the destination address, the hashes don't match or this contract - // instance is already present on the contract call stack, a trap is generated. + // The tombstone hash is derived as `hash(code_hash, storage_root_hash)`. In order to match + // this hash to its own hash the restorer must make its storage equal to the one of the + // evicted destination contract. In order to allow for additional storage items in the + // restoring contract a delta can be specified to this function. All keys specified as + // delta are disregarded when calculating the storage root hash. // - // Otherwise, the destination contract is restored. This function is diverging and stops execution - // even on success. + // On success, the destination contract is restored. This function is diverging and + // stops execution even on success. // - // `dest_ptr`, `dest_len` - the pointer and the length of a buffer that encodes `T::AccountId` - // with the address of the to be restored contract. - // `code_hash_ptr`, `code_hash_len` - the pointer and the length of a buffer that encodes - // a code hash of the to be restored contract. - // `rent_allowance_ptr`, `rent_allowance_len` - the pointer and the length of a buffer that - // encodes the rent allowance that must be set in the case of successful restoration. - // `delta_ptr` is the pointer to the start of a buffer that has `delta_count` storage keys - // laid out sequentially. + // - `dest_ptr`, `dest_len` - the pointer and the length of a buffer that encodes `T::AccountId` + // with the address of the to be restored contract. + // - `code_hash_ptr`, `code_hash_len` - the pointer and the length of a buffer that encodes + // a code hash of the to be restored contract. + // - `rent_allowance_ptr`, `rent_allowance_len` - the pointer and the length of a buffer that + // encodes the rent allowance that must be set in the case of successful restoration. + // - `delta_ptr` is the pointer to the start of a buffer that has `delta_count` storage keys + // laid out sequentially. // // # Traps // - // - Tombstone hashes do not match - // - Calling cantract is live i.e is already on the call stack. + // - There is no tombstone at the destination address. + // - Tombstone hashes do not match. + // - The calling contract is already present on the call stack. + // - The supplied code_hash does not exist on-chain. seal_restore_to( ctx, dest_ptr: u32, @@ -1118,46 +1187,45 @@ define_env!(Env, , delta_ptr: u32, delta_count: u32 ) => { - charge_gas(ctx, RuntimeToken::RestoreTo(delta_count))?; - let dest: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, dest_ptr, dest_len)?; + ctx.charge_gas(RuntimeToken::RestoreTo(delta_count))?; + let dest: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(dest_ptr, dest_len)?; let code_hash: CodeHash<::T> = - read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; + ctx.read_sandbox_memory_as(code_hash_ptr, code_hash_len)?; let rent_allowance: BalanceOf<::T> = - read_sandbox_memory_as(ctx, rent_allowance_ptr, rent_allowance_len)?; + ctx.read_sandbox_memory_as(rent_allowance_ptr, rent_allowance_len)?; let delta = { + const KEY_SIZE: usize = 32; + // We can eagerly allocate because we charged for the complete delta count already - let mut delta = Vec::with_capacity(delta_count as usize); + // We still need to make sure that the allocation isn't larger than the memory + // allocator can handle. + ensure!( + delta_count + .saturating_mul(KEY_SIZE as u32) <= ctx.ext.schedule().limits.max_memory_size(), + Error::::OutOfBounds, + ); + let mut delta = vec![[0; KEY_SIZE]; delta_count as usize]; let mut key_ptr = delta_ptr; - for _ in 0..delta_count { - const KEY_SIZE: usize = 32; - - // Read the delta into the provided buffer and collect it into the buffer. - let mut delta_key: StorageKey = [0; KEY_SIZE]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut delta_key)?; - delta.push(delta_key); + for i in 0..delta_count { + // Read the delta into the provided buffer + // This cannot panic because of the loop condition + ctx.read_sandbox_memory_into_buf(key_ptr, &mut delta[i as usize])?; // Offset key_ptr to the next element. - key_ptr = key_ptr.checked_add(KEY_SIZE as u32).ok_or_else(|| sp_sandbox::HostError)?; + key_ptr = key_ptr.checked_add(KEY_SIZE as u32).ok_or(Error::::OutOfBounds)?; } delta }; - if let Ok(()) = ctx.ext.restore_to( - dest, - code_hash, - rent_allowance, - delta, - ) { - ctx.trap_reason = Some(TrapReason::Restoration); - } - Err(sp_sandbox::HostError) + ctx.ext.restore_to(dest, code_hash, rent_allowance, delta)?; + Err(TrapReason::Restoration) }, // Deposit a contract event with the data buffer and optional list of topics. There is a limit - // on the maximum number of topics specified by `max_event_topics`. + // on the maximum number of topics specified by `event_topics`. // // - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of this // is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. @@ -1167,31 +1235,31 @@ define_env!(Env, , seal_deposit_event(ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, data_len: u32) => { let num_topic = topics_len .checked_div(sp_std::mem::size_of::>() as u32) - .ok_or_else(|| store_err(ctx, "Zero sized topics are not allowed"))?; - charge_gas(ctx, RuntimeToken::DepositEvent { + .ok_or_else(|| "Zero sized topics are not allowed")?; + ctx.charge_gas(RuntimeToken::DepositEvent { num_topic, len: data_len, })?; if data_len > ctx.ext.max_value_size() { - Err(store_err(ctx, Error::::ValueTooLarge))?; + Err(Error::::ValueTooLarge)?; } let mut topics: Vec::::T>> = match topics_len { 0 => Vec::new(), - _ => read_sandbox_memory_as(ctx, topics_ptr, topics_len)?, + _ => ctx.read_sandbox_memory_as(topics_ptr, topics_len)?, }; - // If there are more than `max_event_topics`, then trap. - if topics.len() > ctx.schedule.max_event_topics as usize { - return Err(sp_sandbox::HostError); + // If there are more than `event_topics`, then trap. + if topics.len() > ctx.ext.schedule().limits.event_topics as usize { + Err(Error::::TooManyTopics)?; } // Check for duplicate topics. If there are any, then trap. if has_duplicates(&mut topics) { - return Err(sp_sandbox::HostError); + Err(Error::::DuplicateTopics)?; } - let event_data = read_sandbox_memory(ctx, data_ptr, data_len)?; + let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; ctx.ext.deposit_event(topics, event_data); @@ -1204,9 +1272,9 @@ define_env!(Env, , // Should be decodable as a `T::Balance`. Traps otherwise. // - value_len: length of the value buffer. seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { - charge_gas(ctx, RuntimeToken::SetRentAllowance)?; + ctx.charge_gas(RuntimeToken::SetRentAllowance)?; let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; + ctx.read_sandbox_memory_as(value_ptr, value_len)?; ctx.ext.set_rent_allowance(value); Ok(()) @@ -1221,17 +1289,17 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::RentAllowance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::RentAllowance)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false, already_charged + )?) }, // Prints utf8 encoded string from the data buffer. // Only available on `--dev` chains. // This function may be removed at any time, superseded by a more general contract debugging feature. seal_println(ctx, str_ptr: u32, str_len: u32) => { - let data = read_sandbox_memory(ctx, str_ptr, str_len)?; + let data = ctx.read_sandbox_memory(str_ptr, str_len)?; if let Ok(utf8) = core::str::from_utf8(&data) { sp_runtime::print(utf8); } @@ -1245,10 +1313,10 @@ define_env!(Env, , // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::BlockNumber)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged - ) + ctx.charge_gas(RuntimeToken::BlockNumber)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged + )?) }, // Computes the SHA2 256-bit hash on the given input buffer. @@ -1272,8 +1340,8 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashSha256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, sha2_256, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashSha256(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr)?) }, // Computes the KECCAK 256-bit hash on the given input buffer. @@ -1297,8 +1365,8 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashKeccak256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, keccak_256, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashKeccak256(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr)?) }, // Computes the BLAKE2 256-bit hash on the given input buffer. @@ -1322,8 +1390,8 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashBlake256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, blake2_256, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashBlake256(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr)?) }, // Computes the BLAKE2 128-bit hash on the given input buffer. @@ -1347,62 +1415,40 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashBlake128(input_len))?; - compute_hash_on_intermediate_buffer(ctx, blake2_128, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashBlake128(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr)?) }, -); -/// Computes the given hash function on the supplied input. -/// -/// Reads from the sandboxed input buffer into an intermediate buffer. -/// Returns the result directly to the output buffer of the sandboxed memory. -/// -/// It is the callers responsibility to provide an output buffer that -/// is large enough to hold the expected amount of bytes returned by the -/// chosen hash function. -/// -/// # Note -/// -/// The `input` and `output` buffers may overlap. -fn compute_hash_on_intermediate_buffer( - ctx: &mut Runtime, - hash_fn: F, - input_ptr: u32, - input_len: u32, - output_ptr: u32, -) -> Result<(), sp_sandbox::HostError> -where - E: Ext, - F: FnOnce(&[u8]) -> R, - R: AsRef<[u8]>, -{ - // Copy input into supervisor memory. - let input = read_sandbox_memory(ctx, input_ptr, input_len)?; - // Compute the hash on the input buffer using the given hash function. - let hash = hash_fn(&input); - // Write the resulting hash back into the sandboxed output buffer. - write_sandbox_memory( + // Call into the chain extension provided by the chain if any. + // + // Handling of the input values is up to the specific chain extension and so is the + // return value. The extension can decide to use the inputs as primitive inputs or as + // in/out arguments by interpreting them as pointers. Any caller of this function + // must therefore coordinate with the chain that it targets. + // + // # Note + // + // If no chain extension exists the contract will trap with the `NoChainExtension` + // module error. + seal_call_chain_extension( ctx, - output_ptr, - hash.as_ref(), - )?; - Ok(()) -} - -/// Finds duplicates in a given vector. -/// -/// This function has complexity of O(n log n) and no additional memory is required, although -/// the order of items is not preserved. -fn has_duplicates>(items: &mut Vec) -> bool { - // Sort the vector - items.sort_by(|a, b| { - Ord::cmp(a.as_ref(), b.as_ref()) - }); - // And then find any two consecutive equal elements. - items.windows(2).any(|w| { - match w { - &[ref a, ref b] => a == b, - _ => false, + func_id: u32, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> u32 => { + use crate::chain_extension::{ChainExtension, Environment, RetVal}; + if ::ChainExtension::enabled() == false { + Err(Error::::NoChainExtension)?; } - }) -} + let env = Environment::new(ctx, input_ptr, input_len, output_ptr, output_len_ptr); + match ::ChainExtension::call(func_id, env)? { + RetVal::Converging(val) => Ok(val), + RetVal::Diverging{flags, data} => Err(TrapReason::Return(ReturnData { + flags: flags.bits(), + data, + })), + } + }, +); diff --git a/frame/contracts/src/weight_info.rs b/frame/contracts/src/weight_info.rs deleted file mode 100644 index 3a0881ed78d9a..0000000000000 --- a/frame/contracts/src/weight_info.rs +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! This module contains the `WeightInfo` trait and its unsafe implementation on `()`. - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -/// Should be implemented by automatically generated code of the benchmarking system for -/// every runtime that makes use of this pallet. -/// This trait is also implemented on `()`. The implemention on `()` is **unsafe** and must -/// only be used during development. Proper weights can be generated by running the -/// pallet_contracts benchmark suite for the runtime in question. -pub trait WeightInfo { - fn update_schedule() -> Weight; - fn put_code(n: u32, ) -> Weight; - fn instantiate(n: u32, ) -> Weight; - fn call() -> Weight; - fn claim_surcharge() -> Weight; - fn seal_caller(r: u32, ) -> Weight; - fn seal_address(r: u32, ) -> Weight; - fn seal_gas_left(r: u32, ) -> Weight; - fn seal_balance(r: u32, ) -> Weight; - fn seal_value_transferred(r: u32, ) -> Weight; - fn seal_minimum_balance(r: u32, ) -> Weight; - fn seal_tombstone_deposit(r: u32, ) -> Weight; - fn seal_rent_allowance(r: u32, ) -> Weight; - fn seal_block_number(r: u32, ) -> Weight; - fn seal_now(r: u32, ) -> Weight; - fn seal_weight_to_fee(r: u32, ) -> Weight; - fn seal_gas(r: u32, ) -> Weight; - fn seal_input(r: u32, ) -> Weight; - fn seal_input_per_kb(n: u32, ) -> Weight; - fn seal_return(r: u32, ) -> Weight; - fn seal_return_per_kb(n: u32, ) -> Weight; - fn seal_terminate(r: u32, ) -> Weight; - fn seal_restore_to(r: u32, ) -> Weight; - fn seal_restore_to_per_delta(d: u32, ) -> Weight; - fn seal_random(r: u32, ) -> Weight; - fn seal_deposit_event(r: u32, ) -> Weight; - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; - fn seal_set_rent_allowance(r: u32, ) -> Weight; - fn seal_set_storage(r: u32, ) -> Weight; - fn seal_set_storage_per_kb(n: u32, ) -> Weight; - fn seal_clear_storage(r: u32, ) -> Weight; - fn seal_get_storage(r: u32, ) -> Weight; - fn seal_get_storage_per_kb(n: u32, ) -> Weight; - fn seal_transfer(r: u32, ) -> Weight; - fn seal_call(r: u32, ) -> Weight; - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; - fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight; - fn seal_hash_sha2_256(r: u32, ) -> Weight; - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; - fn seal_hash_keccak_256(r: u32, ) -> Weight; - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight; - fn seal_hash_blake2_256(r: u32, ) -> Weight; - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight; - fn seal_hash_blake2_128(r: u32, ) -> Weight; - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight; -} - -/// Unsafe implementation that must only be used for development. -impl WeightInfo for () { - fn update_schedule() -> Weight { - (45000000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn put_code(n: u32, ) -> Weight { - (263409000 as Weight) - .saturating_add((169269000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn instantiate(n: u32, ) -> Weight { - (309311000 as Weight) - .saturating_add((1018000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(7 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn call() -> Weight { - (291000000 as Weight) - .saturating_add(DbWeight::get().reads(6 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn claim_surcharge() -> Weight { - (766000000 as Weight) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn seal_caller(r: u32, ) -> Weight { - (182241000 as Weight) - .saturating_add((697428000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_address(r: u32, ) -> Weight { - (193846000 as Weight) - .saturating_add((695989000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_gas_left(r: u32, ) -> Weight { - (166031000 as Weight) - .saturating_add((702533000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_balance(r: u32, ) -> Weight { - (251892000 as Weight) - .saturating_add((1392900000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_value_transferred(r: u32, ) -> Weight { - (178472000 as Weight) - .saturating_add((694921000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_minimum_balance(r: u32, ) -> Weight { - (191301000 as Weight) - .saturating_add((697871000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_tombstone_deposit(r: u32, ) -> Weight { - (241315000 as Weight) - .saturating_add((686403000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_rent_allowance(r: u32, ) -> Weight { - (104958000 as Weight) - .saturating_add((1459573000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_block_number(r: u32, ) -> Weight { - (174140000 as Weight) - .saturating_add((698152000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_now(r: u32, ) -> Weight { - (203157000 as Weight) - .saturating_add((713595000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_weight_to_fee(r: u32, ) -> Weight { - (178413000 as Weight) - .saturating_add((1071275000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_gas(r: u32, ) -> Weight { - (171395000 as Weight) - .saturating_add((371653000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_input(r: u32, ) -> Weight { - (184462000 as Weight) - .saturating_add((10538000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_input_per_kb(n: u32, ) -> Weight { - (194668000 as Weight) - .saturating_add((301000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_return(r: u32, ) -> Weight { - (175538000 as Weight) - .saturating_add((7462000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_return_per_kb(n: u32, ) -> Weight { - (189759000 as Weight) - .saturating_add((754000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_terminate(r: u32, ) -> Weight { - (184385000 as Weight) - .saturating_add((542615000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to(r: u32, ) -> Weight { - (380385000 as Weight) - .saturating_add((160308000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (0 as Weight) - .saturating_add((4786197000 as Weight).saturating_mul(d as Weight)) - .saturating_add(DbWeight::get().reads(7 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) - .saturating_add(DbWeight::get().writes(5 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) - } - fn seal_random(r: u32, ) -> Weight { - (187944000 as Weight) - .saturating_add((1592530000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_deposit_event(r: u32, ) -> Weight { - (126517000 as Weight) - .saturating_add((2346945000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (2953428000 as Weight) - .saturating_add((1117651000 as Weight).saturating_mul(t as Weight)) - .saturating_add((299890000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) - } - fn seal_set_rent_allowance(r: u32, ) -> Weight { - (142094000 as Weight) - .saturating_add((1726665000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn seal_set_storage(r: u32, ) -> Weight { - (4091409000 as Weight) - .saturating_add((26440116000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (3683270000 as Weight) - .saturating_add((233826000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn seal_clear_storage(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((7152747000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage(r: u32, ) -> Weight { - (19007000 as Weight) - .saturating_add((1774675000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (1477332000 as Weight) - .saturating_add((176601000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_transfer(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((10274385000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call(r: u32, ) -> Weight { - (241916000 as Weight) - .saturating_add((14633108000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (15664107000 as Weight) - .saturating_add((8529984000 as Weight).saturating_mul(t as Weight)) - .saturating_add((52860000 as Weight).saturating_mul(i as Weight)) - .saturating_add((81175000 as Weight).saturating_mul(o as Weight)) - .saturating_add(DbWeight::get().reads(105 as Weight)) - .saturating_add(DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) - .saturating_add(DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) - } - fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((32247550000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((200 as Weight).saturating_mul(r as Weight))) - } - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight { - (34376003000 as Weight) - .saturating_add((151350000 as Weight).saturating_mul(i as Weight)) - .saturating_add((82364000 as Weight).saturating_mul(o as Weight)) - .saturating_add(DbWeight::get().reads(207 as Weight)) - .saturating_add(DbWeight::get().writes(202 as Weight)) - } - fn seal_hash_sha2_256(r: u32, ) -> Weight { - (164203000 as Weight) - .saturating_add((565206000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((330063000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256(r: u32, ) -> Weight { - (219038000 as Weight) - .saturating_add((567992000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (434654000 as Weight) - .saturating_add((271134000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256(r: u32, ) -> Weight { - (116374000 as Weight) - .saturating_add((566612000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (756028000 as Weight) - .saturating_add((150363000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128(r: u32, ) -> Weight { - (150126000 as Weight) - .saturating_add((564827000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (1021689000 as Weight) - .saturating_add((149452000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } -} diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs new file mode 100644 index 0000000000000..9c53611038739 --- /dev/null +++ b/frame/contracts/src/weights.rs @@ -0,0 +1,1332 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_contracts +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 +//! DATE: 2021-02-04, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_contracts +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/contracts/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_contracts. +pub trait WeightInfo { + fn on_initialize() -> Weight; + fn on_initialize_per_trie_key(k: u32, ) -> Weight; + fn on_initialize_per_queue_item(q: u32, ) -> Weight; + fn update_schedule() -> Weight; + fn instantiate_with_code(c: u32, s: u32, ) -> Weight; + fn instantiate(s: u32, ) -> Weight; + fn call() -> Weight; + fn claim_surcharge() -> Weight; + fn seal_caller(r: u32, ) -> Weight; + fn seal_address(r: u32, ) -> Weight; + fn seal_gas_left(r: u32, ) -> Weight; + fn seal_balance(r: u32, ) -> Weight; + fn seal_value_transferred(r: u32, ) -> Weight; + fn seal_minimum_balance(r: u32, ) -> Weight; + fn seal_tombstone_deposit(r: u32, ) -> Weight; + fn seal_rent_allowance(r: u32, ) -> Weight; + fn seal_block_number(r: u32, ) -> Weight; + fn seal_now(r: u32, ) -> Weight; + fn seal_weight_to_fee(r: u32, ) -> Weight; + fn seal_gas(r: u32, ) -> Weight; + fn seal_input(r: u32, ) -> Weight; + fn seal_input_per_kb(n: u32, ) -> Weight; + fn seal_return(r: u32, ) -> Weight; + fn seal_return_per_kb(n: u32, ) -> Weight; + fn seal_terminate(r: u32, ) -> Weight; + fn seal_restore_to(r: u32, ) -> Weight; + fn seal_restore_to_per_delta(d: u32, ) -> Weight; + fn seal_random(r: u32, ) -> Weight; + fn seal_deposit_event(r: u32, ) -> Weight; + fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; + fn seal_set_rent_allowance(r: u32, ) -> Weight; + fn seal_set_storage(r: u32, ) -> Weight; + fn seal_set_storage_per_kb(n: u32, ) -> Weight; + fn seal_clear_storage(r: u32, ) -> Weight; + fn seal_get_storage(r: u32, ) -> Weight; + fn seal_get_storage_per_kb(n: u32, ) -> Weight; + fn seal_transfer(r: u32, ) -> Weight; + fn seal_call(r: u32, ) -> Weight; + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; + fn seal_instantiate(r: u32, ) -> Weight; + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight; + fn seal_hash_sha2_256(r: u32, ) -> Weight; + fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_keccak_256(r: u32, ) -> Weight; + fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_blake2_256(r: u32, ) -> Weight; + fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_blake2_128(r: u32, ) -> Weight; + fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight; + fn instr_i64const(r: u32, ) -> Weight; + fn instr_i64load(r: u32, ) -> Weight; + fn instr_i64store(r: u32, ) -> Weight; + fn instr_select(r: u32, ) -> Weight; + fn instr_if(r: u32, ) -> Weight; + fn instr_br(r: u32, ) -> Weight; + fn instr_br_if(r: u32, ) -> Weight; + fn instr_br_table(r: u32, ) -> Weight; + fn instr_br_table_per_entry(e: u32, ) -> Weight; + fn instr_call(r: u32, ) -> Weight; + fn instr_call_indirect(r: u32, ) -> Weight; + fn instr_call_indirect_per_param(p: u32, ) -> Weight; + fn instr_local_get(r: u32, ) -> Weight; + fn instr_local_set(r: u32, ) -> Weight; + fn instr_local_tee(r: u32, ) -> Weight; + fn instr_global_get(r: u32, ) -> Weight; + fn instr_global_set(r: u32, ) -> Weight; + fn instr_memory_current(r: u32, ) -> Weight; + fn instr_memory_grow(r: u32, ) -> Weight; + fn instr_i64clz(r: u32, ) -> Weight; + fn instr_i64ctz(r: u32, ) -> Weight; + fn instr_i64popcnt(r: u32, ) -> Weight; + fn instr_i64eqz(r: u32, ) -> Weight; + fn instr_i64extendsi32(r: u32, ) -> Weight; + fn instr_i64extendui32(r: u32, ) -> Weight; + fn instr_i32wrapi64(r: u32, ) -> Weight; + fn instr_i64eq(r: u32, ) -> Weight; + fn instr_i64ne(r: u32, ) -> Weight; + fn instr_i64lts(r: u32, ) -> Weight; + fn instr_i64ltu(r: u32, ) -> Weight; + fn instr_i64gts(r: u32, ) -> Weight; + fn instr_i64gtu(r: u32, ) -> Weight; + fn instr_i64les(r: u32, ) -> Weight; + fn instr_i64leu(r: u32, ) -> Weight; + fn instr_i64ges(r: u32, ) -> Weight; + fn instr_i64geu(r: u32, ) -> Weight; + fn instr_i64add(r: u32, ) -> Weight; + fn instr_i64sub(r: u32, ) -> Weight; + fn instr_i64mul(r: u32, ) -> Weight; + fn instr_i64divs(r: u32, ) -> Weight; + fn instr_i64divu(r: u32, ) -> Weight; + fn instr_i64rems(r: u32, ) -> Weight; + fn instr_i64remu(r: u32, ) -> Weight; + fn instr_i64and(r: u32, ) -> Weight; + fn instr_i64or(r: u32, ) -> Weight; + fn instr_i64xor(r: u32, ) -> Weight; + fn instr_i64shl(r: u32, ) -> Weight; + fn instr_i64shrs(r: u32, ) -> Weight; + fn instr_i64shru(r: u32, ) -> Weight; + fn instr_i64rotl(r: u32, ) -> Weight; + fn instr_i64rotr(r: u32, ) -> Weight; +} + +/// Weights for pallet_contracts using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn on_initialize() -> Weight { + (3_947_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + fn on_initialize_per_trie_key(k: u32, ) -> Weight { + (46_644_000 as Weight) + // Standard Error: 5_000 + .saturating_add((2_295_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + } + fn on_initialize_per_queue_item(q: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 164_000 + .saturating_add((165_220_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn update_schedule() -> Weight { + (28_195_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn instantiate_with_code(c: u32, s: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 126_000 + .saturating_add((154_196_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 63_000 + .saturating_add((2_764_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn instantiate(s: u32, ) -> Weight { + (201_407_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_247_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn call() -> Weight { + (180_337_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn claim_surcharge() -> Weight { + (322_371_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn seal_caller(r: u32, ) -> Weight { + (135_499_000 as Weight) + // Standard Error: 296_000 + .saturating_add((275_938_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_address(r: u32, ) -> Weight { + (132_674_000 as Weight) + // Standard Error: 158_000 + .saturating_add((273_808_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_gas_left(r: u32, ) -> Weight { + (126_819_000 as Weight) + // Standard Error: 145_000 + .saturating_add((269_173_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_balance(r: u32, ) -> Weight { + (140_223_000 as Weight) + // Standard Error: 259_000 + .saturating_add((581_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_value_transferred(r: u32, ) -> Weight { + (129_490_000 as Weight) + // Standard Error: 132_000 + .saturating_add((269_433_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_minimum_balance(r: u32, ) -> Weight { + (127_251_000 as Weight) + // Standard Error: 161_000 + .saturating_add((268_720_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_tombstone_deposit(r: u32, ) -> Weight { + (129_546_000 as Weight) + // Standard Error: 130_000 + .saturating_add((268_280_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_rent_allowance(r: u32, ) -> Weight { + (133_306_000 as Weight) + // Standard Error: 208_000 + .saturating_add((604_235_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_block_number(r: u32, ) -> Weight { + (133_689_000 as Weight) + // Standard Error: 115_000 + .saturating_add((267_107_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_now(r: u32, ) -> Weight { + (133_773_000 as Weight) + // Standard Error: 130_000 + .saturating_add((268_897_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_weight_to_fee(r: u32, ) -> Weight { + (133_222_000 as Weight) + // Standard Error: 476_000 + .saturating_add((514_400_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + } + fn seal_gas(r: u32, ) -> Weight { + (118_769_000 as Weight) + // Standard Error: 102_000 + .saturating_add((134_134_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_input(r: u32, ) -> Weight { + (124_719_000 as Weight) + // Standard Error: 93_000 + .saturating_add((7_486_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_input_per_kb(n: u32, ) -> Weight { + (136_348_000 as Weight) + // Standard Error: 0 + .saturating_add((274_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_return(r: u32, ) -> Weight { + (118_710_000 as Weight) + // Standard Error: 77_000 + .saturating_add((4_566_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_return_per_kb(n: u32, ) -> Weight { + (127_609_000 as Weight) + // Standard Error: 0 + .saturating_add((786_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_terminate(r: u32, ) -> Weight { + (125_463_000 as Weight) + // Standard Error: 154_000 + .saturating_add((106_188_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) + } + fn seal_restore_to(r: u32, ) -> Weight { + (219_195_000 as Weight) + // Standard Error: 361_000 + .saturating_add((131_326_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes((6 as Weight).saturating_mul(r as Weight))) + } + fn seal_restore_to_per_delta(d: u32, ) -> Weight { + (6_742_000 as Weight) + // Standard Error: 2_484_000 + .saturating_add((3_747_735_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) + .saturating_add(T::DbWeight::get().writes(7 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) + } + fn seal_random(r: u32, ) -> Weight { + (137_248_000 as Weight) + // Standard Error: 662_000 + .saturating_add((661_121_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + } + fn seal_deposit_event(r: u32, ) -> Weight { + (147_654_000 as Weight) + // Standard Error: 305_000 + .saturating_add((935_148_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { + (1_246_123_000 as Weight) + // Standard Error: 2_807_000 + .saturating_add((585_535_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 553_000 + .saturating_add((249_976_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) + } + fn seal_set_rent_allowance(r: u32, ) -> Weight { + (140_588_000 as Weight) + // Standard Error: 228_000 + .saturating_add((707_872_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn seal_set_storage(r: u32, ) -> Weight { + (2_767_124_000 as Weight) + // Standard Error: 18_504_000 + .saturating_add((17_507_873_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_set_storage_per_kb(n: u32, ) -> Weight { + (1_748_586_000 as Weight) + // Standard Error: 359_000 + .saturating_add((75_231_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn seal_clear_storage(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_209_000 + .saturating_add((2_261_355_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_get_storage(r: u32, ) -> Weight { + (83_780_000 as Weight) + // Standard Error: 965_000 + .saturating_add((973_164_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_get_storage_per_kb(n: u32, ) -> Weight { + (728_625_000 as Weight) + // Standard Error: 294_000 + .saturating_add((154_625_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + } + fn seal_transfer(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 1_543_000 + .saturating_add((5_467_966_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_call(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 9_216_000 + .saturating_add((10_265_093_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) + } + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { + (10_426_869_000 as Weight) + // Standard Error: 114_622_000 + .saturating_add((4_366_037_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 40_000 + .saturating_add((59_741_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 43_000 + .saturating_add((82_331_000 as Weight).saturating_mul(o as Weight)) + .saturating_add(T::DbWeight::get().reads(206 as Weight)) + .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) + } + fn seal_instantiate(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 35_927_000 + .saturating_add((21_088_623_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) + } + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (17_200_760_000 as Weight) + // Standard Error: 157_000 + .saturating_add((61_221_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 157_000 + .saturating_add((84_149_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 157_000 + .saturating_add((284_655_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(207 as Weight)) + .saturating_add(T::DbWeight::get().writes(203 as Weight)) + } + fn seal_hash_sha2_256(r: u32, ) -> Weight { + (126_005_000 as Weight) + // Standard Error: 133_000 + .saturating_add((252_338_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { + (727_930_000 as Weight) + // Standard Error: 57_000 + .saturating_add((430_299_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_keccak_256(r: u32, ) -> Weight { + (129_778_000 as Weight) + // Standard Error: 146_000 + .saturating_add((266_097_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { + (683_078_000 as Weight) + // Standard Error: 42_000 + .saturating_add((344_294_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_256(r: u32, ) -> Weight { + (141_731_000 as Weight) + // Standard Error: 251_000 + .saturating_add((239_931_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { + (563_895_000 as Weight) + // Standard Error: 51_000 + .saturating_add((160_216_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_128(r: u32, ) -> Weight { + (132_587_000 as Weight) + // Standard Error: 159_000 + .saturating_add((239_287_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { + (606_572_000 as Weight) + // Standard Error: 34_000 + .saturating_add((160_101_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + } + fn instr_i64const(r: u32, ) -> Weight { + (24_366_000 as Weight) + // Standard Error: 21_000 + .saturating_add((3_114_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64load(r: u32, ) -> Weight { + (26_779_000 as Weight) + // Standard Error: 28_000 + .saturating_add((161_654_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64store(r: u32, ) -> Weight { + (26_763_000 as Weight) + // Standard Error: 88_000 + .saturating_add((232_822_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_select(r: u32, ) -> Weight { + (24_342_000 as Weight) + // Standard Error: 36_000 + .saturating_add((12_530_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_if(r: u32, ) -> Weight { + (24_301_000 as Weight) + // Standard Error: 25_000 + .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br(r: u32, ) -> Weight { + (24_253_000 as Weight) + // Standard Error: 21_000 + .saturating_add((6_464_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_if(r: u32, ) -> Weight { + (24_259_000 as Weight) + // Standard Error: 20_000 + .saturating_add((14_030_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table(r: u32, ) -> Weight { + (24_313_000 as Weight) + // Standard Error: 37_000 + .saturating_add((15_788_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table_per_entry(e: u32, ) -> Weight { + (37_991_000 as Weight) + // Standard Error: 0 + .saturating_add((138_000 as Weight).saturating_mul(e as Weight)) + } + fn instr_call(r: u32, ) -> Weight { + (24_739_000 as Weight) + // Standard Error: 31_000 + .saturating_add((97_567_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect(r: u32, ) -> Weight { + (32_395_000 as Weight) + // Standard Error: 432_000 + .saturating_add((198_972_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect_per_param(p: u32, ) -> Weight { + (238_857_000 as Weight) + // Standard Error: 6_000 + .saturating_add((3_491_000 as Weight).saturating_mul(p as Weight)) + } + fn instr_local_get(r: u32, ) -> Weight { + (42_196_000 as Weight) + // Standard Error: 22_000 + .saturating_add((3_161_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_set(r: u32, ) -> Weight { + (42_133_000 as Weight) + // Standard Error: 29_000 + .saturating_add((3_459_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_tee(r: u32, ) -> Weight { + (42_164_000 as Weight) + // Standard Error: 25_000 + .saturating_add((4_653_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_get(r: u32, ) -> Weight { + (27_802_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_780_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_set(r: u32, ) -> Weight { + (27_826_000 as Weight) + // Standard Error: 21_000 + .saturating_add((11_978_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_current(r: u32, ) -> Weight { + (26_753_000 as Weight) + // Standard Error: 20_000 + .saturating_add((3_494_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_grow(r: u32, ) -> Weight { + (25_078_000 as Weight) + // Standard Error: 4_213_000 + .saturating_add((2_324_209_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64clz(r: u32, ) -> Weight { + (24_301_000 as Weight) + // Standard Error: 28_000 + .saturating_add((5_201_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ctz(r: u32, ) -> Weight { + (24_237_000 as Weight) + // Standard Error: 14_000 + .saturating_add((5_251_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64popcnt(r: u32, ) -> Weight { + (24_290_000 as Weight) + // Standard Error: 20_000 + .saturating_add((5_780_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eqz(r: u32, ) -> Weight { + (24_278_000 as Weight) + // Standard Error: 17_000 + .saturating_add((5_145_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendsi32(r: u32, ) -> Weight { + (24_249_000 as Weight) + // Standard Error: 14_000 + .saturating_add((5_248_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendui32(r: u32, ) -> Weight { + (24_266_000 as Weight) + // Standard Error: 13_000 + .saturating_add((5_236_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i32wrapi64(r: u32, ) -> Weight { + (24_236_000 as Weight) + // Standard Error: 12_000 + .saturating_add((5_304_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eq(r: u32, ) -> Weight { + (24_262_000 as Weight) + // Standard Error: 22_000 + .saturating_add((7_220_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ne(r: u32, ) -> Weight { + (24_287_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_072_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64lts(r: u32, ) -> Weight { + (24_211_000 as Weight) + // Standard Error: 12_000 + .saturating_add((7_196_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ltu(r: u32, ) -> Weight { + (24_175_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gts(r: u32, ) -> Weight { + (24_209_000 as Weight) + // Standard Error: 11_000 + .saturating_add((7_131_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gtu(r: u32, ) -> Weight { + (24_261_000 as Weight) + // Standard Error: 19_000 + .saturating_add((7_203_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64les(r: u32, ) -> Weight { + (24_258_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_120_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64leu(r: u32, ) -> Weight { + (24_236_000 as Weight) + // Standard Error: 11_000 + .saturating_add((7_076_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ges(r: u32, ) -> Weight { + (24_262_000 as Weight) + // Standard Error: 20_000 + .saturating_add((7_261_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64geu(r: u32, ) -> Weight { + (24_242_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_249_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64add(r: u32, ) -> Weight { + (24_248_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_149_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64sub(r: u32, ) -> Weight { + (24_243_000 as Weight) + // Standard Error: 14_000 + .saturating_add((7_128_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64mul(r: u32, ) -> Weight { + (24_217_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_237_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divs(r: u32, ) -> Weight { + (24_191_000 as Weight) + // Standard Error: 28_000 + .saturating_add((12_970_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divu(r: u32, ) -> Weight { + (24_213_000 as Weight) + // Standard Error: 19_000 + .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rems(r: u32, ) -> Weight { + (24_238_000 as Weight) + // Standard Error: 15_000 + .saturating_add((12_944_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64remu(r: u32, ) -> Weight { + (24_317_000 as Weight) + // Standard Error: 16_000 + .saturating_add((12_129_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64and(r: u32, ) -> Weight { + (24_282_000 as Weight) + // Standard Error: 14_000 + .saturating_add((7_123_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64or(r: u32, ) -> Weight { + (24_243_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_148_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64xor(r: u32, ) -> Weight { + (24_239_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_157_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shl(r: u32, ) -> Weight { + (24_279_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_253_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shrs(r: u32, ) -> Weight { + (24_285_000 as Weight) + // Standard Error: 29_000 + .saturating_add((7_333_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shru(r: u32, ) -> Weight { + (24_298_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotl(r: u32, ) -> Weight { + (24_226_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_269_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotr(r: u32, ) -> Weight { + (24_235_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_299_000 as Weight).saturating_mul(r as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn on_initialize() -> Weight { + (3_947_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + fn on_initialize_per_trie_key(k: u32, ) -> Weight { + (46_644_000 as Weight) + // Standard Error: 5_000 + .saturating_add((2_295_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + } + fn on_initialize_per_queue_item(q: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 164_000 + .saturating_add((165_220_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn update_schedule() -> Weight { + (28_195_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn instantiate_with_code(c: u32, s: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 126_000 + .saturating_add((154_196_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 63_000 + .saturating_add((2_764_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn instantiate(s: u32, ) -> Weight { + (201_407_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_247_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn call() -> Weight { + (180_337_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn claim_surcharge() -> Weight { + (322_371_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn seal_caller(r: u32, ) -> Weight { + (135_499_000 as Weight) + // Standard Error: 296_000 + .saturating_add((275_938_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_address(r: u32, ) -> Weight { + (132_674_000 as Weight) + // Standard Error: 158_000 + .saturating_add((273_808_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_gas_left(r: u32, ) -> Weight { + (126_819_000 as Weight) + // Standard Error: 145_000 + .saturating_add((269_173_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_balance(r: u32, ) -> Weight { + (140_223_000 as Weight) + // Standard Error: 259_000 + .saturating_add((581_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_value_transferred(r: u32, ) -> Weight { + (129_490_000 as Weight) + // Standard Error: 132_000 + .saturating_add((269_433_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_minimum_balance(r: u32, ) -> Weight { + (127_251_000 as Weight) + // Standard Error: 161_000 + .saturating_add((268_720_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_tombstone_deposit(r: u32, ) -> Weight { + (129_546_000 as Weight) + // Standard Error: 130_000 + .saturating_add((268_280_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_rent_allowance(r: u32, ) -> Weight { + (133_306_000 as Weight) + // Standard Error: 208_000 + .saturating_add((604_235_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_block_number(r: u32, ) -> Weight { + (133_689_000 as Weight) + // Standard Error: 115_000 + .saturating_add((267_107_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_now(r: u32, ) -> Weight { + (133_773_000 as Weight) + // Standard Error: 130_000 + .saturating_add((268_897_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_weight_to_fee(r: u32, ) -> Weight { + (133_222_000 as Weight) + // Standard Error: 476_000 + .saturating_add((514_400_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + } + fn seal_gas(r: u32, ) -> Weight { + (118_769_000 as Weight) + // Standard Error: 102_000 + .saturating_add((134_134_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_input(r: u32, ) -> Weight { + (124_719_000 as Weight) + // Standard Error: 93_000 + .saturating_add((7_486_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_input_per_kb(n: u32, ) -> Weight { + (136_348_000 as Weight) + // Standard Error: 0 + .saturating_add((274_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_return(r: u32, ) -> Weight { + (118_710_000 as Weight) + // Standard Error: 77_000 + .saturating_add((4_566_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_return_per_kb(n: u32, ) -> Weight { + (127_609_000 as Weight) + // Standard Error: 0 + .saturating_add((786_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_terminate(r: u32, ) -> Weight { + (125_463_000 as Weight) + // Standard Error: 154_000 + .saturating_add((106_188_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) + } + fn seal_restore_to(r: u32, ) -> Weight { + (219_195_000 as Weight) + // Standard Error: 361_000 + .saturating_add((131_326_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes((6 as Weight).saturating_mul(r as Weight))) + } + fn seal_restore_to_per_delta(d: u32, ) -> Weight { + (6_742_000 as Weight) + // Standard Error: 2_484_000 + .saturating_add((3_747_735_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) + .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) + } + fn seal_random(r: u32, ) -> Weight { + (137_248_000 as Weight) + // Standard Error: 662_000 + .saturating_add((661_121_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + } + fn seal_deposit_event(r: u32, ) -> Weight { + (147_654_000 as Weight) + // Standard Error: 305_000 + .saturating_add((935_148_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { + (1_246_123_000 as Weight) + // Standard Error: 2_807_000 + .saturating_add((585_535_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 553_000 + .saturating_add((249_976_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) + } + fn seal_set_rent_allowance(r: u32, ) -> Weight { + (140_588_000 as Weight) + // Standard Error: 228_000 + .saturating_add((707_872_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn seal_set_storage(r: u32, ) -> Weight { + (2_767_124_000 as Weight) + // Standard Error: 18_504_000 + .saturating_add((17_507_873_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_set_storage_per_kb(n: u32, ) -> Weight { + (1_748_586_000 as Weight) + // Standard Error: 359_000 + .saturating_add((75_231_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn seal_clear_storage(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_209_000 + .saturating_add((2_261_355_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_get_storage(r: u32, ) -> Weight { + (83_780_000 as Weight) + // Standard Error: 965_000 + .saturating_add((973_164_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_get_storage_per_kb(n: u32, ) -> Weight { + (728_625_000 as Weight) + // Standard Error: 294_000 + .saturating_add((154_625_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + } + fn seal_transfer(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 1_543_000 + .saturating_add((5_467_966_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + fn seal_call(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 9_216_000 + .saturating_add((10_265_093_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) + } + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { + (10_426_869_000 as Weight) + // Standard Error: 114_622_000 + .saturating_add((4_366_037_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 40_000 + .saturating_add((59_741_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 43_000 + .saturating_add((82_331_000 as Weight).saturating_mul(o as Weight)) + .saturating_add(RocksDbWeight::get().reads(206 as Weight)) + .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) + } + fn seal_instantiate(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 35_927_000 + .saturating_add((21_088_623_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) + } + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (17_200_760_000 as Weight) + // Standard Error: 157_000 + .saturating_add((61_221_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 157_000 + .saturating_add((84_149_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 157_000 + .saturating_add((284_655_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(207 as Weight)) + .saturating_add(RocksDbWeight::get().writes(203 as Weight)) + } + fn seal_hash_sha2_256(r: u32, ) -> Weight { + (126_005_000 as Weight) + // Standard Error: 133_000 + .saturating_add((252_338_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { + (727_930_000 as Weight) + // Standard Error: 57_000 + .saturating_add((430_299_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_keccak_256(r: u32, ) -> Weight { + (129_778_000 as Weight) + // Standard Error: 146_000 + .saturating_add((266_097_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { + (683_078_000 as Weight) + // Standard Error: 42_000 + .saturating_add((344_294_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_256(r: u32, ) -> Weight { + (141_731_000 as Weight) + // Standard Error: 251_000 + .saturating_add((239_931_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { + (563_895_000 as Weight) + // Standard Error: 51_000 + .saturating_add((160_216_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_128(r: u32, ) -> Weight { + (132_587_000 as Weight) + // Standard Error: 159_000 + .saturating_add((239_287_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { + (606_572_000 as Weight) + // Standard Error: 34_000 + .saturating_add((160_101_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + } + fn instr_i64const(r: u32, ) -> Weight { + (24_366_000 as Weight) + // Standard Error: 21_000 + .saturating_add((3_114_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64load(r: u32, ) -> Weight { + (26_779_000 as Weight) + // Standard Error: 28_000 + .saturating_add((161_654_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64store(r: u32, ) -> Weight { + (26_763_000 as Weight) + // Standard Error: 88_000 + .saturating_add((232_822_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_select(r: u32, ) -> Weight { + (24_342_000 as Weight) + // Standard Error: 36_000 + .saturating_add((12_530_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_if(r: u32, ) -> Weight { + (24_301_000 as Weight) + // Standard Error: 25_000 + .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br(r: u32, ) -> Weight { + (24_253_000 as Weight) + // Standard Error: 21_000 + .saturating_add((6_464_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_if(r: u32, ) -> Weight { + (24_259_000 as Weight) + // Standard Error: 20_000 + .saturating_add((14_030_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table(r: u32, ) -> Weight { + (24_313_000 as Weight) + // Standard Error: 37_000 + .saturating_add((15_788_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table_per_entry(e: u32, ) -> Weight { + (37_991_000 as Weight) + // Standard Error: 0 + .saturating_add((138_000 as Weight).saturating_mul(e as Weight)) + } + fn instr_call(r: u32, ) -> Weight { + (24_739_000 as Weight) + // Standard Error: 31_000 + .saturating_add((97_567_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect(r: u32, ) -> Weight { + (32_395_000 as Weight) + // Standard Error: 432_000 + .saturating_add((198_972_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect_per_param(p: u32, ) -> Weight { + (238_857_000 as Weight) + // Standard Error: 6_000 + .saturating_add((3_491_000 as Weight).saturating_mul(p as Weight)) + } + fn instr_local_get(r: u32, ) -> Weight { + (42_196_000 as Weight) + // Standard Error: 22_000 + .saturating_add((3_161_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_set(r: u32, ) -> Weight { + (42_133_000 as Weight) + // Standard Error: 29_000 + .saturating_add((3_459_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_tee(r: u32, ) -> Weight { + (42_164_000 as Weight) + // Standard Error: 25_000 + .saturating_add((4_653_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_get(r: u32, ) -> Weight { + (27_802_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_780_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_set(r: u32, ) -> Weight { + (27_826_000 as Weight) + // Standard Error: 21_000 + .saturating_add((11_978_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_current(r: u32, ) -> Weight { + (26_753_000 as Weight) + // Standard Error: 20_000 + .saturating_add((3_494_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_grow(r: u32, ) -> Weight { + (25_078_000 as Weight) + // Standard Error: 4_213_000 + .saturating_add((2_324_209_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64clz(r: u32, ) -> Weight { + (24_301_000 as Weight) + // Standard Error: 28_000 + .saturating_add((5_201_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ctz(r: u32, ) -> Weight { + (24_237_000 as Weight) + // Standard Error: 14_000 + .saturating_add((5_251_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64popcnt(r: u32, ) -> Weight { + (24_290_000 as Weight) + // Standard Error: 20_000 + .saturating_add((5_780_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eqz(r: u32, ) -> Weight { + (24_278_000 as Weight) + // Standard Error: 17_000 + .saturating_add((5_145_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendsi32(r: u32, ) -> Weight { + (24_249_000 as Weight) + // Standard Error: 14_000 + .saturating_add((5_248_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendui32(r: u32, ) -> Weight { + (24_266_000 as Weight) + // Standard Error: 13_000 + .saturating_add((5_236_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i32wrapi64(r: u32, ) -> Weight { + (24_236_000 as Weight) + // Standard Error: 12_000 + .saturating_add((5_304_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eq(r: u32, ) -> Weight { + (24_262_000 as Weight) + // Standard Error: 22_000 + .saturating_add((7_220_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ne(r: u32, ) -> Weight { + (24_287_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_072_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64lts(r: u32, ) -> Weight { + (24_211_000 as Weight) + // Standard Error: 12_000 + .saturating_add((7_196_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ltu(r: u32, ) -> Weight { + (24_175_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gts(r: u32, ) -> Weight { + (24_209_000 as Weight) + // Standard Error: 11_000 + .saturating_add((7_131_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gtu(r: u32, ) -> Weight { + (24_261_000 as Weight) + // Standard Error: 19_000 + .saturating_add((7_203_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64les(r: u32, ) -> Weight { + (24_258_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_120_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64leu(r: u32, ) -> Weight { + (24_236_000 as Weight) + // Standard Error: 11_000 + .saturating_add((7_076_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ges(r: u32, ) -> Weight { + (24_262_000 as Weight) + // Standard Error: 20_000 + .saturating_add((7_261_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64geu(r: u32, ) -> Weight { + (24_242_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_249_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64add(r: u32, ) -> Weight { + (24_248_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_149_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64sub(r: u32, ) -> Weight { + (24_243_000 as Weight) + // Standard Error: 14_000 + .saturating_add((7_128_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64mul(r: u32, ) -> Weight { + (24_217_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_237_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divs(r: u32, ) -> Weight { + (24_191_000 as Weight) + // Standard Error: 28_000 + .saturating_add((12_970_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divu(r: u32, ) -> Weight { + (24_213_000 as Weight) + // Standard Error: 19_000 + .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rems(r: u32, ) -> Weight { + (24_238_000 as Weight) + // Standard Error: 15_000 + .saturating_add((12_944_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64remu(r: u32, ) -> Weight { + (24_317_000 as Weight) + // Standard Error: 16_000 + .saturating_add((12_129_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64and(r: u32, ) -> Weight { + (24_282_000 as Weight) + // Standard Error: 14_000 + .saturating_add((7_123_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64or(r: u32, ) -> Weight { + (24_243_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_148_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64xor(r: u32, ) -> Weight { + (24_239_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_157_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shl(r: u32, ) -> Weight { + (24_279_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_253_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shrs(r: u32, ) -> Weight { + (24_285_000 as Weight) + // Standard Error: 29_000 + .saturating_add((7_333_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shru(r: u32, ) -> Weight { + (24_298_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotl(r: u32, ) -> Weight { + (24_226_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_269_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotr(r: u32, ) -> Weight { + (24_235_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_299_000 as Weight).saturating_mul(r as Weight)) + } +} diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 44639a2275644..2e675dd25188e 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-democracy" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,20 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-scheduler = { version = "2.0.0", path = "../scheduler" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-scheduler = { version = "3.0.0", path = "../scheduler" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } +substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } hex-literal = "0.3.1" [features] diff --git a/frame/democracy/README.md b/frame/democracy/README.md index ffbf2f36a1760..6a390cc048e1c 100644 --- a/frame/democracy/README.md +++ b/frame/democracy/README.md @@ -132,4 +132,4 @@ This call can only be made by the `VetoOrigin`. - `cancel_queued` - Cancels a proposal that is queued for enactment. - `clear_public_proposal` - Removes all public proposals. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index b5de1a91c17ad..c66ce20dab87c 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,21 +34,21 @@ const MAX_REFERENDUMS: u32 = 99; const MAX_SECONDERS: u32 = 100; const MAX_BYTES: u32 = 16_384; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = System::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn add_proposal(n: u32) -> Result { +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); let proposal_hash: T::Hash = T::Hashing::hash_of(&n); @@ -62,7 +62,7 @@ fn add_proposal(n: u32) -> Result { Ok(proposal_hash) } -fn add_referendum(n: u32) -> Result { +fn add_referendum(n: u32) -> Result { let proposal_hash: T::Hash = T::Hashing::hash_of(&n); let vote_threshold = VoteThreshold::SimpleMajority; @@ -84,7 +84,7 @@ fn add_referendum(n: u32) -> Result { Ok(referendum_index) } -fn account_vote(b: BalanceOf) -> AccountVote> { +fn account_vote(b: BalanceOf) -> AccountVote> { let v = Vote { aye: true, conviction: Conviction::Locked1x, @@ -97,8 +97,6 @@ fn account_vote(b: BalanceOf) -> AccountVote> { } benchmarks! { - _ { } - propose { let p = T::MaxProposals::get(); diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index bb563e4b74830..c2dff741a9c23 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index fa8d07fd78db7..a7dd2d5bd9297 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Democracy Pallet //! -//! - [`democracy::Trait`](./trait.Trait.html) +//! - [`democracy::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -199,13 +199,13 @@ pub type PropIndex = u32; /// A referendum index. pub type ReferendumIndex = u32; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait + Sized { +pub trait Config: frame_system::Config + Sized { type Proposal: Parameter + Dispatchable + From>; - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Currency type for this module. type Currency: ReservableCurrency @@ -338,7 +338,7 @@ enum Releases { } decl_storage! { - trait Store for Module as Democracy { + trait Store for Module as Democracy { // TODO: Refactor public proposal queue into its own pallet. // https://github.com/paritytech/substrate/issues/5322 /// The number of (public) proposals that have been made so far. @@ -413,9 +413,9 @@ decl_storage! { decl_event! { pub enum Event where Balance = BalanceOf, - ::AccountId, - ::Hash, - ::BlockNumber, + ::AccountId, + ::Hash, + ::BlockNumber, { /// A motion has been proposed by a public account. \[proposal_index, deposit\] Proposed(PropIndex, Balance), @@ -461,7 +461,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Value too low ValueLow, /// Proposal does not exist @@ -537,7 +537,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The minimum period of locking and the period between a proposal being approved and enacted. @@ -1086,7 +1086,7 @@ decl_module! { } /// Enact a proposal from a referendum. For now we just make the weight be the maximum. - #[weight = T::MaximumBlockWeight::get()] + #[weight = T::BlockWeights::get().max_block] fn enact_proposal(origin, proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { ensure_root(origin)?; Self::do_enact_proposal(proposal_hash, index) @@ -1168,7 +1168,7 @@ decl_module! { } } -impl Module { +impl Module { // exposed immutables. /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal @@ -1609,6 +1609,7 @@ impl Module { /// - Db reads per R: `DepositOf`, `ReferendumInfoOf` /// # fn begin_block(now: T::BlockNumber) -> Result { + let max_block_weight = T::BlockWeights::get().max_block; let mut weight = 0; // pick out another public referendum if it's time. @@ -1616,7 +1617,7 @@ impl Module { // Errors come from the queue being empty. we don't really care about that, and even if // we did, there is nothing we can do here. let _ = Self::launch_next(now); - weight = T::MaximumBlockWeight::get(); + weight = max_block_weight; } let next = Self::lowest_unbaked(); @@ -1627,7 +1628,7 @@ impl Module { for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { let approved = Self::bake_referendum(now, index, info)?; ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); - weight = T::MaximumBlockWeight::get(); + weight = max_block_weight; } Ok(weight) diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index bcc7099bb34a4..99f413b389284 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ //! The crate's tests. +use crate as pallet_democracy; use super::*; -use std::cell::RefCell; use codec::Encode; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, - impl_outer_event, ord_parameter_types, traits::{Contains, OnInitialize, Filter}, + assert_noop, assert_ok, parameter_types, ord_parameter_types, + traits::{Contains, OnInitialize, Filter}, weights::Weight, }; use sp_core::H256; @@ -51,30 +51,21 @@ const BIG_NAY: Vote = Vote { aye: false, conviction: Conviction::Locked1x }; const MAX_PROPOSALS: u32 = 100; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - democracy::Democracy, +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Scheduler: pallet_scheduler::{Module, Call, Storage, Config, Event}, + Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, } -} - -mod democracy { - pub use crate::Event; -} - -impl_outer_event! { - pub enum Event for Test { - system, - pallet_balances, - pallet_scheduler, - democracy, - } -} +); // Test that a fitlered call can be dispatched. pub struct BaseFilter; @@ -84,17 +75,16 @@ impl Filter for BaseFilter { } } -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1_000_000; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1_000_000); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -106,24 +96,18 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } -impl pallet_scheduler::Trait for Test { +impl pallet_scheduler::Config for Test { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -136,7 +120,7 @@ impl pallet_scheduler::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = Event; @@ -154,6 +138,8 @@ parameter_types! { pub const CooloffPeriod: u64 = 2; pub const MaxVotes: u32 = 100; pub const MaxProposals: u32 = MAX_PROPOSALS; + pub static PreimageByteDeposit: u64 = 0; + pub static InstantAllowed: bool = false; } ord_parameter_types! { pub const One: u64 = 1; @@ -171,19 +157,8 @@ impl Contains for OneToFive { #[cfg(feature = "runtime-benchmarks")] fn add(_m: &u64) {} } -thread_local! { - static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); - static INSTANT_ALLOWED: RefCell = RefCell::new(false); -} -pub struct PreimageByteDeposit; -impl Get for PreimageByteDeposit { - fn get() -> u64 { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) } -} -pub struct InstantAllowed; -impl Get for InstantAllowed { - fn get() -> bool { INSTANT_ALLOWED.with(|v| *v.borrow()) } -} -impl super::Trait for Test { + +impl Config for Test { type Proposal = Call; type Event = Event; type Currency = pallet_balances::Module; @@ -218,7 +193,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pallet_balances::GenesisConfig::{ balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage(&mut t).unwrap(); + pallet_democracy::GenesisConfig::default().assimilate_storage(&mut t).unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -230,11 +205,6 @@ pub fn new_test_ext_execute_with_cond(execute: impl FnOnce(bool) -> () + Clone) new_test_ext().execute_with(|| execute(true)); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Scheduler = pallet_scheduler::Module; -type Democracy = Module; - #[test] fn params_should_work() { new_test_ext().execute_with(|| { @@ -252,7 +222,7 @@ fn set_balance_proposal(value: u64) -> Vec { fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { let call = Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); } } diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index 4221865a3e5b0..d48173a39d832 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 6b8e661ca9fd9..52b61d8d9e7d0 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! The for various partial storage decoders diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index 34dec6d0b49a6..d3afa1c13f90b 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index 3f9be2137906b..ff1a7a87da852 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 8df34001cde04..d01dafaa762ba 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index 93867030588c3..29cd24e1de60a 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs index 8a2cbaf534032..135b167520be5 100644 --- a/frame/democracy/src/tests/preimage.rs +++ b/frame/democracy/src/tests/preimage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index d862aa98e7880..4785ef0a89467 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 5bcfbae994689..e178ff0fc1a25 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 9ae57797d15dd..207085ceb570f 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs index 8ee0838f8a36d..22341ba31ee03 100644 --- a/frame/democracy/src/types.rs +++ b/frame/democracy/src/types.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index 09ff0d71e48ca..5adc76f4ae00b 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,7 +30,7 @@ pub struct Vote { } impl Encode for Vote { - fn encode_to(&self, output: &mut T) { + fn encode_to(&self, output: &mut T) { output.push_byte(u8::from(self.conviction) | if self.aye { 0b1000_0000 } else { 0 }); } } diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index 2268a55936c50..3114b22499d0e 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index e386e5fb55313..7c169cc813ead 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -72,7 +72,7 @@ pub trait WeightInfo { /// Weights for pallet_democracy using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn propose() -> Weight { (87_883_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) diff --git a/frame/elections-phragmen/CHANGELOG.md b/frame/elections-phragmen/CHANGELOG.md new file mode 100644 index 0000000000000..3d48448fa55ec --- /dev/null +++ b/frame/elections-phragmen/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog +All notable changes to this crate will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this crate adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [3.0.0] - UNRELEASED + +### Added +[Add slashing events to elections-phragmen](https://github.com/paritytech/substrate/pull/7543) + +### Changed + +### Fixed +[Don't slash all outgoing members](https://github.com/paritytech/substrate/pull/7394) +[Fix wrong outgoing calculation in election](https://github.com/paritytech/substrate/pull/7384) + +### Security +\[**Needs Migration**\] [Fix elections-phragmen and proxy issue + Record deposits on-chain](https://github.com/paritytech/substrate/pull/7040) + +## [2.0.0] - 2020-09-2020 + +Initial version from which version tracking has begun. + diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 8d59cde19255a..bdb301c73ec36 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,21 +13,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-npos-elections = { version = "2.0.0", default-features = false, path = "../../primitives/npos-elections" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +pallet-balances = { version = "3.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/elections-phragmen/README.md b/frame/elections-phragmen/README.md index 5507d53970632..8c5940ea2d78e 100644 --- a/frame/elections-phragmen/README.md +++ b/frame/elections-phragmen/README.md @@ -64,4 +64,4 @@ being re-elected at the end of each round. - [`Call`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/enum.Call.html) - [`Module`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/struct.Module.html) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index e7c3719480b70..511d2751a5d77 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Elections-Phragmen pallet benchmarking. @@ -30,7 +31,7 @@ const BALANCE_FACTOR: u32 = 250; const MAX_VOTERS: u32 = 500; const MAX_CANDIDATES: u32 = 200; -type Lookup = <::Lookup as StaticLookup>::Source; +type Lookup = <::Lookup as StaticLookup>::Source; macro_rules! whitelist { ($acc:ident) => { @@ -41,7 +42,7 @@ macro_rules! whitelist { } /// grab new account with infinite balance. -fn endowed_account(name: &'static str, index: u32) -> T::AccountId { +fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); let amount = default_stake::(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); @@ -53,37 +54,23 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { } /// Account to lookup type of system trait. -fn as_lookup(account: T::AccountId) -> Lookup { +fn as_lookup(account: T::AccountId) -> Lookup { T::Lookup::unlookup(account) } /// Get a reasonable amount of stake based on the execution trait's configuration -fn default_stake(factor: u32) -> BalanceOf { +fn default_stake(factor: u32) -> BalanceOf { let factor = BalanceOf::::from(factor); T::Currency::minimum_balance() * factor } /// Get the current number of candidates. -fn candidate_count() -> u32 { +fn candidate_count() -> u32 { >::decode_len().unwrap_or(0usize) as u32 } -/// Get the number of votes of a voter. -fn vote_count_of(who: &T::AccountId) -> u32 { - >::get(who).1.len() as u32 -} - -/// A `DefunctVoter` struct with correct value -fn defunct_for(who: T::AccountId) -> DefunctVoter> { - DefunctVoter { - who: as_lookup::(who.clone()), - candidate_count: candidate_count::(), - vote_count: vote_count_of::(&who), - } -} - /// Add `c` new candidates. -fn submit_candidates(c: u32, prefix: &'static str) +fn submit_candidates(c: u32, prefix: &'static str) -> Result, &'static str> { (0..c).map(|i| { @@ -97,28 +84,28 @@ fn submit_candidates(c: u32, prefix: &'static str) } /// Add `c` new candidates with self vote. -fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) +fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) -> Result, &'static str> { let candidates = submit_candidates::(c, prefix)?; let stake = default_stake::(BALANCE_FACTOR); let _ = candidates.iter().map(|c| - submit_voter::(c.clone(), vec![c.clone()], stake) + submit_voter::(c.clone(), vec![c.clone()], stake).map(|_| ()) ).collect::>()?; Ok(candidates) } /// Submit one voter. -fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) - -> Result<(), sp_runtime::DispatchError> +fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) + -> frame_support::dispatch::DispatchResult { >::vote(RawOrigin::Signed(caller).into(), votes, stake) } /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if /// available. -fn distribute_voters(mut all_candidates: Vec, num_voters: u32, votes: usize) +fn distribute_voters(mut all_candidates: Vec, num_voters: u32, votes: usize) -> Result<(), &'static str> { let stake = default_stake::(BALANCE_FACTOR); @@ -138,7 +125,7 @@ fn distribute_voters(mut all_candidates: Vec, num_voters /// Fill the seats of members and runners-up up until `m`. Note that this might include either only /// members, or members and runners-up. -fn fill_seats_up_to(m: u32) -> Result, &'static str> { +fn fill_seats_up_to(m: u32) -> Result, &'static str> { let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; assert_eq!(>::candidates().len() as u32, m, "wrong number of candidates."); >::do_phragmen(); @@ -151,25 +138,23 @@ fn fill_seats_up_to(m: u32) -> Result, &'static str> Ok( >::members() .into_iter() - .map(|(x, _)| x) - .chain(>::runners_up().into_iter().map(|(x, _)| x)) + .map(|m| m.who) + .chain(>::runners_up().into_iter().map(|r| r.who)) .collect() ) } /// removes all the storage items to reverse any genesis state. -fn clean() { +fn clean() { >::kill(); >::kill(); >::kill(); - let _ = >::drain(); + >::remove_all(); } benchmarks! { - _ {} - // -- Signed ones - vote { + vote_equal { let v in 1 .. (MAXIMUM_VOTE as u32); clean::(); @@ -179,14 +164,39 @@ benchmarks! { let caller = endowed_account::("caller", 0); let stake = default_stake::(BALANCE_FACTOR); - // vote for all of them. - let votes = all_candidates; + // original votes. + let mut votes = all_candidates; + submit_voter::(caller.clone(), votes.clone(), stake)?; + + // new votes. + votes.rotate_left(1); whitelist!(caller); - }: _(RawOrigin::Signed(caller), votes, stake) + }: vote(RawOrigin::Signed(caller), votes, stake) - vote_update { - let v in 1 .. (MAXIMUM_VOTE as u32); + vote_more { + let v in 2 .. (MAXIMUM_VOTE as u32); + clean::(); + + // create a bunch of candidates. + let all_candidates = submit_candidates::(v, "candidates")?; + + let caller = endowed_account::("caller", 0); + let stake = default_stake::(BALANCE_FACTOR); + + // original votes. + let mut votes = all_candidates.iter().skip(1).cloned().collect::>(); + submit_voter::(caller.clone(), votes.clone(), stake / >::from(10u32))?; + + // new votes. + votes = all_candidates; + assert!(votes.len() > >::get(caller.clone()).votes.len()); + + whitelist!(caller); + }: vote(RawOrigin::Signed(caller), votes, stake / >::from(10u32)) + + vote_less { + let v in 2 .. (MAXIMUM_VOTE as u32); clean::(); // create a bunch of candidates. @@ -200,7 +210,8 @@ benchmarks! { submit_voter::(caller.clone(), votes.clone(), stake)?; // new votes. - votes.rotate_left(1); + votes = votes.into_iter().skip(1).collect::>(); + assert!(votes.len() < >::get(caller.clone()).votes.len()); whitelist!(caller); }: vote(RawOrigin::Signed(caller), votes, stake) @@ -221,123 +232,6 @@ benchmarks! { whitelist!(caller); }: _(RawOrigin::Signed(caller)) - report_defunct_voter_correct { - // number of already existing candidates that may or may not be voted by the reported - // account. - let c in 1 .. MAX_CANDIDATES; - // number of candidates that the reported voter voted for. The worse case of search here is - // basically `c * v`. - let v in 1 .. (MAXIMUM_VOTE as u32); - // we fix the number of members to the number of desired members and runners-up. We'll be in - // this state almost always. - let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - - clean::(); - let stake = default_stake::(BALANCE_FACTOR); - - // create m members and runners combined. - let _ = fill_seats_up_to::(m)?; - - // create a bunch of candidates as well. - let bailing_candidates = submit_candidates::(v, "bailing_candidates")?; - let all_candidates = submit_candidates::(c, "all_candidates")?; - - // account 1 is the reporter and must be whitelisted, and a voter. - let account_1 = endowed_account::("caller", 0); - submit_voter::( - account_1.clone(), - all_candidates.iter().take(1).cloned().collect(), - stake, - )?; - - // account 2 votes for all of the mentioned candidates. - let account_2 = endowed_account::("caller_2", 1); - submit_voter::( - account_2.clone(), - bailing_candidates.clone(), - stake, - )?; - - // all the bailers go away. NOTE: we can simplify this. There's no need to create all these - // candidates and remove them. The defunct voter can just vote for random accounts as long - // as there are enough members (potential candidates). - bailing_candidates.into_iter().for_each(|b| { - let count = candidate_count::(); - assert!(>::renounce_candidacy( - RawOrigin::Signed(b).into(), - Renouncing::Candidate(count), - ).is_ok()); - }); - - let defunct_info = defunct_for::(account_2.clone()); - whitelist!(account_1); - - assert!(>::is_voter(&account_2)); - }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct_info) - verify { - assert!(!>::is_voter(&account_2)); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - - report_defunct_voter_incorrect { - // number of already existing candidates that may or may not be voted by the reported - // account. - let c in 1 .. MAX_CANDIDATES; - // number of candidates that the reported voter voted for. The worse case of search here is - // basically `c * v`. - let v in 1 .. (MAXIMUM_VOTE as u32); - // we fix the number of members to the number of desired members and runners-up. We'll be in - // this state almost always. - let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - - clean::(); - let stake = default_stake::(BALANCE_FACTOR); - - // create m members and runners combined. - let _ = fill_seats_up_to::(m)?; - - // create a bunch of candidates as well. - let all_candidates = submit_candidates::(c, "candidates")?; - - // account 1 is the reporter and need to be whitelisted, and a voter. - let account_1 = endowed_account::("caller", 0); - submit_voter::( - account_1.clone(), - all_candidates.iter().take(1).cloned().collect(), - stake, - )?; - - // account 2 votes for a bunch of crap, and finally a correct candidate. - let account_2 = endowed_account::("caller_2", 1); - let mut invalid: Vec = (0..(v-1)) - .map(|seed| account::("invalid", 0, seed).clone()) - .collect(); - invalid.push(all_candidates.last().unwrap().clone()); - submit_voter::( - account_2.clone(), - invalid, - stake, - )?; - - let defunct_info = defunct_for::(account_2.clone()); - whitelist!(account_1); - }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct_info) - verify { - // account 2 is still a voter. - assert!(>::is_voter(&account_2)); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - submit_candidacy { // number of already existing candidates. let c in 1 .. MAX_CANDIDATES; @@ -520,20 +414,52 @@ benchmarks! { } } - #[extra] - on_initialize { - // if n % TermDuration is zero, then we run phragmen. The weight function must and should - // check this as it is cheap to do so. TermDuration is not a storage item, it is a constant - // encoded in the runtime. + clean_defunct_voters { + // total number of voters. + let v in (MAX_VOTERS / 2) .. MAX_VOTERS; + // those that are defunct and need removal. + let d in 1 .. (MAX_VOTERS / 2); + + // remove any previous stuff. + clean::(); + + let all_candidates = submit_candidates::(v, "candidates")?; + distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; + + // all candidates leave. + >::kill(); + + // now everyone is defunct + assert!(>::iter().all(|(_, v)| >::is_defunct_voter(&v.votes))); + assert_eq!(>::iter().count() as u32, v); + let root = RawOrigin::Root; + }: _(root, v, d) + verify { + assert_eq!(>::iter().count() as u32, 0); + } + + election_phragmen { + // This is just to focus on phragmen in the context of this module. We always select 20 + // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. + // Yet, change the number of voters, candidates and edge per voter to see the impact. Note + // that we give all candidates a self vote to make sure they are all considered. let c in 1 .. MAX_CANDIDATES; + let v in 1 .. MAX_VOTERS; + let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; clean::(); - // create c candidates. + // so we have a situation with v and e. we want e to basically always be in the range of `e + // -> e * MAXIMUM_VOTE`, but we cannot express that now with the benchmarks. So what we do + // is: when c is being iterated, v, and e are max and fine. when v is being iterated, e is + // being set to max and this is a problem. In these cases, we cap e to a lower value, namely + // v * MAXIMUM_VOTE. when e is being iterated, v is at max, and again fine. all in all, + // votes_per_voter can never be more than MAXIMUM_VOTE. Note that this might cause `v` to be + // an overestimate. + let votes_per_voter = (e / v).min(MAXIMUM_VOTE as u32); + let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - // create 500 voters, each voting the maximum 16 - distribute_voters::(all_candidates, MAX_VOTERS, MAXIMUM_VOTE)?; + let _ = distribute_voters::(all_candidates, v, votes_per_voter as usize)?; }: { - // elect >::on_initialize(T::TermDuration::get()); } verify { @@ -552,18 +478,16 @@ benchmarks! { } #[extra] - phragmen { - // This is just to focus on phragmen in the context of this module. We always select 20 - // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. - // Yet, change the number of voters, candidates and edge per voter to see the impact. Note - // that we give all candidates a self vote to make sure they are all considered. + election_phragmen_c_e { let c in 1 .. MAX_CANDIDATES; - let v in 1 .. MAX_VOTERS; - let e in 1 .. (MAXIMUM_VOTE as u32); + let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; + let fixed_v = MAX_VOTERS; clean::(); + let votes_per_voter = e / fixed_v; + let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, v, e as usize)?; + let _ = distribute_voters::(all_candidates, fixed_v, votes_per_voter as usize)?; }: { >::on_initialize(T::TermDuration::get()); } @@ -581,6 +505,35 @@ benchmarks! { MEMBERS.with(|m| *m.borrow_mut() = vec![]); } } + + #[extra] + election_phragmen_v { + let v in 4 .. 16; + let fixed_c = MAX_CANDIDATES; + let fixed_e = 64; + clean::(); + + let votes_per_voter = fixed_e / v; + + let all_candidates = submit_candidates_with_self_vote::(fixed_c, "candidates")?; + let _ = distribute_voters::(all_candidates, v, votes_per_voter as usize)?; + }: { + >::on_initialize(T::TermDuration::get()); + } + verify { + assert_eq!(>::members().len() as u32, T::DesiredMembers::get().min(fixed_c)); + assert_eq!( + >::runners_up().len() as u32, + T::DesiredRunnersUp::get().min(fixed_c.saturating_sub(T::DesiredMembers::get())), + ); + + #[cfg(test)] + { + // reset members in between benchmark tests. + use crate::tests::MEMBERS; + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + } + } } #[cfg(test)] @@ -591,52 +544,76 @@ mod tests { #[test] fn test_benchmarks_elections_phragmen() { - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_vote::()); - }); + ExtBuilder::default() + .desired_members(13) + .desired_runners_up(7) + .build_and_execute(|| { + assert_ok!(test_benchmark_vote_equal::()); + }); + + ExtBuilder::default() + .desired_members(13) + .desired_runners_up(7) + .build_and_execute(|| { + assert_ok!(test_benchmark_vote_more::()); + }); + + ExtBuilder::default() + .desired_members(13) + .desired_runners_up(7) + .build_and_execute(|| { + assert_ok!(test_benchmark_vote_less::()); + }); + + ExtBuilder::default() + .desired_members(13) + .desired_runners_up(7) + .build_and_execute(|| { + assert_ok!(test_benchmark_remove_voter::()); + }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_voter::()); + assert_ok!(test_benchmark_submit_candidacy::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_report_defunct_voter_correct::()); + assert_ok!(test_benchmark_renounce_candidacy_candidate::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_report_defunct_voter_incorrect::()); + assert_ok!(test_benchmark_renounce_candidacy_runners_up::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_submit_candidacy::()); + assert_ok!(test_benchmark_renounce_candidacy_members::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_candidate::()); + assert_ok!(test_benchmark_remove_member_without_replacement::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_runners_up::()); + assert_ok!(test_benchmark_remove_member_with_replacement::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_members::()); + assert_ok!(test_benchmark_clean_defunct_voters::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_member_without_replacement::()); + assert_ok!(test_benchmark_election_phragmen::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_member_with_replacement::()); + assert_ok!(test_benchmark_election_phragmen::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_on_initialize::()); + assert_ok!(test_benchmark_election_phragmen_c_e::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_phragmen::()); + assert_ok!(test_benchmark_election_phragmen_v::()); }); } } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index cf3864f2e3f92..057e9f181c7af 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,51 +23,66 @@ //! //! The election happens in _rounds_: every `N` blocks, all previous members are retired and a new //! set is elected (which may or may not have an intersection with the previous set). Each round -//! lasts for some number of blocks defined by `TermDuration` storage item. The words _term_ and +//! lasts for some number of blocks defined by [`Config::TermDuration`]. The words _term_ and //! _round_ can be used interchangeably in this context. //! -//! `TermDuration` might change during a round. This can shorten or extend the length of the round. -//! The next election round's block number is never stored but rather always checked on the fly. -//! Based on the current block number and `TermDuration`, the condition `BlockNumber % TermDuration -//! == 0` being satisfied will always trigger a new election round. +//! [`Config::TermDuration`] might change during a round. This can shorten or extend the length of +//! the round. The next election round's block number is never stored but rather always checked on +//! the fly. Based on the current block number and [`Config::TermDuration`], the condition +//! `BlockNumber % TermDuration == 0` being satisfied will always trigger a new election round. +//! +//! ### Bonds and Deposits +//! +//! Both voting and being a candidate requires deposits to be taken, in exchange for the data that +//! needs to be kept on-chain. The terms *bond* and *deposit* can be used interchangeably in this +//! context. +//! +//! Bonds will be unreserved only upon adhering to the protocol laws. Failing to do so will cause in +//! the bond to slashed. //! //! ### Voting //! -//! Voters can vote for any set of the candidates by providing a list of account ids. Invalid votes -//! (voting for non-candidates) are ignored during election. Yet, a voter _might_ vote for a future -//! candidate. Voters reserve a bond as they vote. Each vote defines a `value`. This amount is -//! locked from the account of the voter and indicates the weight of the vote. Voters can update -//! their votes at any time by calling `vote()` again. This keeps the bond untouched but can -//! optionally change the locked `value`. After a round, votes are kept and might still be valid for +//! Voters can vote for a limited number of the candidates by providing a list of account ids, +//! bounded by [`MAXIMUM_VOTE`]. Invalid votes (voting for non-candidates) and duplicate votes are +//! ignored during election. Yet, a voter _might_ vote for a future candidate. Voters reserve a bond +//! as they vote. Each vote defines a `value`. This amount is locked from the account of the voter +//! and indicates the weight of the vote. Voters can update their votes at any time by calling +//! `vote()` again. This can update the vote targets (which might update the deposit) or update the +//! vote's stake ([`Voter::stake`]). After a round, votes are kept and might still be valid for //! further rounds. A voter is responsible for calling `remove_voter` once they are done to have //! their bond back and remove the lock. //! -//! Voters also report other voters as being defunct to earn their bond. A voter is defunct once all -//! of the candidates that they have voted for are neither a valid candidate anymore nor a member. -//! Upon reporting, if the target voter is actually defunct, the reporter will be rewarded by the -//! voting bond of the target. The target will lose their bond and get removed. If the target is not -//! defunct, the reporter is slashed and removed. To prevent being reported, voters should manually -//! submit a `remove_voter()` as soon as they are in the defunct state. +//! See [`Call::vote`], [`Call::remove_voter`]. +//! +//! ### Defunct Voter +//! +//! A voter is defunct once all of the candidates that they have voted for are not a valid candidate +//! (as seen further below, members and runners-up are also always candidates). Defunct voters can +//! be removed via a root call ([`Call::clean_defunct_voters`]). Upon being removed, their bond is +//! returned. This is an administrative operation and can be called only by the root origin in the +//! case of state bloat. //! //! ### Candidacy and Members //! -//! Candidates also reserve a bond as they submit candidacy. A candidate cannot take their candidacy -//! back. A candidate can end up in one of the below situations: -//! - **Winner**: A winner is kept as a _member_. They must still have a bond in reserve and they -//! are automatically counted as a candidate for the next election. +//! Candidates also reserve a bond as they submit candidacy. A candidate can end up in one of the +//! below situations: +//! - **Members**: A winner is kept as a _member_. They must still have a bond in reserve and they +//! are automatically counted as a candidate for the next election. The number of desired +//! members is set by [`Config::DesiredMembers`]. //! - **Runner-up**: Runners-up are the best candidates immediately after the winners. The number -//! of runners_up to keep is configurable. Runners-up are used, in order that they are elected, -//! as replacements when a candidate is kicked by `[remove_member]`, or when an active member -//! renounces their candidacy. Runners are automatically counted as a candidate for the next -//! election. -//! - **Loser**: Any of the candidate who are not a winner are left as losers. A loser might be an -//! _outgoing member or runner_, meaning that they are an active member who failed to keep their -//! spot. An outgoing will always lose their bond. +//! of runners up to keep is set by [`Config::DesiredRunnersUp`]. Runners-up are used, in the +//! same order as they are elected, as replacements when a candidate is kicked by +//! [`Call::remove_member`], or when an active member renounces their candidacy. Runners are +//! automatically counted as a candidate for the next election. +//! - **Loser**: Any of the candidate who are not member/runner-up are left as losers. A loser +//! might be an _outgoing member or runner-up_, meaning that they are an active member who +//! failed to keep their spot. **An outgoing candidate/member/runner-up will always lose their +//! bond**. //! -//! ##### Renouncing candidacy. +//! #### Renouncing candidacy. //! -//! All candidates, elected or not, can renounce their candidacy. A call to [`Module::renounce_candidacy`] -//! will always cause the candidacy bond to be refunded. +//! All candidates, elected or not, can renounce their candidacy. A call to +//! [`Call::renounce_candidacy`] will always cause the candidacy bond to be refunded. //! //! Note that with the members being the default candidates for the next round and votes persisting //! in storage, the election system is entirely stable given no further input. This means that if @@ -77,7 +92,7 @@ //! //! ### Module Information //! -//! - [`election_sp_phragmen::Trait`](./trait.Trait.html) +//! - [`election_sp_phragmen::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) @@ -90,7 +105,7 @@ use frame_support::{ ensure, storage::{IterableStorageMap, StorageMap}, traits::{ - BalanceStatus, ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, + ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, WithdrawReasons, }, @@ -102,19 +117,21 @@ use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, DispatchError, Perbill, RuntimeDebug, }; -use sp_std::prelude::*; +use sp_std::{prelude::*, cmp::Ordering}; mod benchmarking; pub mod weights; pub use weights::WeightInfo; +pub mod migrations_3_0_0; + /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// An indication that the renouncing account currently has which of the below roles. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] @@ -127,22 +144,35 @@ pub enum Renouncing { Candidate(#[codec(compact)] u32), } -/// Information needed to prove the defunct-ness of a voter. -#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] -pub struct DefunctVoter { - /// the voter's who's being challenged for being defunct +/// An active voter. +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] +pub struct Voter { + /// The members being backed. + pub votes: Vec, + /// The amount of stake placed on this vote. + pub stake: Balance, + /// The amount of deposit reserved for this vote. + /// + /// To be unreserved upon removal. + pub deposit: Balance, +} + +/// A holder of a seat as either a member or a runner-up. +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] +pub struct SeatHolder { + /// The holder. pub who: AccountId, - /// The number of votes that `who` has placed. - #[codec(compact)] - pub vote_count: u32, - /// The number of current active candidates. - #[codec(compact)] - pub candidate_count: u32 + /// The total backing stake. + pub stake: Balance, + /// The amount of deposit held on-chain. + /// + /// To be unreserved upon renouncing, or slashed upon being a loser. + pub deposit: Balance, } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type.c - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Identifier for the elections-phragmen pallet's lock type ModuleId: Get; @@ -165,15 +195,18 @@ pub trait Trait: frame_system::Trait { /// How much should be locked up in order to submit one's candidacy. type CandidacyBond: Get>; - /// How much should be locked up in order to be able to submit votes. - type VotingBond: Get>; + /// Base deposit associated with voting. + /// + /// This should be sensibly high to economically ensure the pallet cannot be attacked by + /// creating a gigantic number of votes. + type VotingBondBase: Get>; + + /// The amount of bond that need to be locked for each vote (32 bytes). + type VotingBondFactor: Get>; /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) type LoserCandidate: OnUnbalanced>; - /// Handler for the unbalanced reduction when a reporter has submitted a bad defunct report. - type BadReport: OnUnbalanced>; - /// Handler for the unbalanced reduction when a member has been kicked. type KickedMember: OnUnbalanced>; @@ -193,53 +226,68 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as PhragmenElection { - // ---- State - /// The current elected membership. Sorted based on account id. - pub Members get(fn members): Vec<(T::AccountId, BalanceOf)>; - /// The current runners_up. Sorted based on low to high merit (worse to best). - pub RunnersUp get(fn runners_up): Vec<(T::AccountId, BalanceOf)>; + trait Store for Module as PhragmenElection { + /// The current elected members. + /// + /// Invariant: Always sorted based on account id. + pub Members get(fn members): Vec>>; + + /// The current reserved runners-up. + /// + /// Invariant: Always sorted based on rank (worse to best). Upon removal of a member, the + /// last (i.e. _best_) runner-up will be replaced. + pub RunnersUp get(fn runners_up): Vec>>; + + /// The present candidate list. A current member or runner-up can never enter this vector + /// and is always implicitly assumed to be a candidate. + /// + /// Second element is the deposit. + /// + /// Invariant: Always sorted based on account id. + pub Candidates get(fn candidates): Vec<(T::AccountId, BalanceOf)>; + /// The total number of vote rounds that have happened, excluding the upcoming one. pub ElectionRounds get(fn election_rounds): u32 = Zero::zero(); /// Votes and locked stake of a particular voter. /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash - pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); - - /// The present candidate list. Sorted based on account-id. A current member or runner-up - /// can never enter this vector and is always implicitly assumed to be a candidate. - pub Candidates get(fn candidates): Vec; + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => Voter>; } add_extra_genesis { config(members): Vec<(T::AccountId, BalanceOf)>; build(|config: &GenesisConfig| { + assert!( + config.members.len() as u32 <= T::DesiredMembers::get(), + "Cannot accept more than DesiredMembers genesis member", + ); let members = config.members.iter().map(|(ref member, ref stake)| { - // make sure they have enough stake + // make sure they have enough stake. assert!( T::Currency::free_balance(member) >= *stake, - "Genesis member does not have enough stake", + "Genesis member does not have enough stake.", ); - // reserve candidacy bond and set as members. - T::Currency::reserve(&member, T::CandidacyBond::get()) - .expect("Genesis member does not have enough balance to be a candidate"); - // Note: all members will only vote for themselves, hence they must be given exactly // their own stake as total backing. Any sane election should behave as such. // Nonetheless, stakes will be updated for term 1 onwards according to the election. Members::::mutate(|members| { - match members.binary_search_by(|(a, _b)| a.cmp(member)) { - Ok(_) => panic!("Duplicate member in elections phragmen genesis: {}", member), - Err(pos) => members.insert(pos, (member.clone(), *stake)), + match members.binary_search_by(|m| m.who.cmp(member)) { + Ok(_) => panic!("Duplicate member in elections-phragmen genesis: {}", member), + Err(pos) => members.insert( + pos, + SeatHolder { who: member.clone(), stake: *stake, deposit: Zero::zero() }, + ), } }); - // set self-votes to make persistent. - >::vote( - T::Origin::from(Some(member.clone()).into()), - vec![member.clone()], - *stake, - ).expect("Genesis member could not vote."); + // set self-votes to make persistent. Genesis voters don't have any bond, nor do + // they have any lock. NOTE: this means that we will still try to remove a lock once + // this genesis voter is removed, and for now it is okay because remove_lock is noop + // if lock is not there. + >::insert( + &member, + Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, + ); member.clone() }).collect::>(); @@ -251,7 +299,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Cannot vote when no candidates or members exist. UnableToVote, /// Must vote for at least one candidate. @@ -273,13 +321,13 @@ decl_error! { /// Member cannot re-submit candidacy. MemberSubmit, /// Runner cannot re-submit candidacy. - RunnerSubmit, + RunnerUpSubmit, /// Candidate does not have enough funds. InsufficientCandidateFunds, /// Not a member. NotMember, /// The provided count of number of candidates is incorrect. - InvalidCandidateCount, + InvalidWitnessData, /// The provided count of number of votes is incorrect. InvalidVoteCount, /// The renouncing origin presented a wrong `Renouncing` parameter. @@ -289,46 +337,74 @@ decl_error! { } } +decl_event!( + pub enum Event where Balance = BalanceOf, ::AccountId { + /// A new term with \[new_members\]. This indicates that enough candidates existed to run the + /// election, not that enough have has been elected. The inner value must be examined for + /// this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond slashed and + /// none were elected, whilst `EmptyTerm` means that no candidates existed to begin with. + NewTerm(Vec<(AccountId, Balance)>), + /// No (or not enough) candidates existed for this round. This is different from + /// `NewTerm(\[\])`. See the description of `NewTerm`. + EmptyTerm, + /// Internal error happened while trying to perform election. + ElectionError, + /// A \[member\] has been removed. This should always be followed by either `NewTerm` or + /// `EmptyTerm`. + MemberKicked(AccountId), + /// Someone has renounced their candidacy. + Renounced(AccountId), + /// A \[candidate\] was slashed by \[amount\] due to failing to obtain a seat as member or + /// runner-up. + /// + /// Note that old members and runners-up are also candidates. + CandidateSlashed(AccountId, Balance), + /// A \[seat holder\] was slashed by \[amount\] by being forcefully removed from the set. + SeatHolderSlashed(AccountId, Balance), + } +); + decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; - fn deposit_event() = default; const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - const VotingBond: BalanceOf = T::VotingBond::get(); + const VotingBondBase: BalanceOf = T::VotingBondBase::get(); + const VotingBondFactor: BalanceOf = T::VotingBondFactor::get(); const DesiredMembers: u32 = T::DesiredMembers::get(); const DesiredRunnersUp: u32 = T::DesiredRunnersUp::get(); const TermDuration: T::BlockNumber = T::TermDuration::get(); - const ModuleId: LockIdentifier = T::ModuleId::get(); + const ModuleId: LockIdentifier = T::ModuleId::get(); /// Vote for a set of candidates for the upcoming round of election. This can be called to /// set the initial votes, or update already existing votes. /// - /// Upon initial voting, `value` units of `who`'s balance is locked and a bond amount is - /// reserved. + /// Upon initial voting, `value` units of `who`'s balance is locked and a deposit amount is + /// reserved. The deposit is based on the number of votes and can be updated over time. /// /// The `votes` should: /// - not be empty. /// - be less than the number of possible candidates. Note that all current members and /// runners-up are also automatically candidates for the next round. /// - /// It is the responsibility of the caller to not place all of their balance into the lock - /// and keep some for further transactions. + /// If `value` is more than `who`'s total balance, then the maximum of the two is used. + /// + /// The dispatch origin of this call must be signed. + /// + /// ### Warning + /// + /// It is the responsibility of the caller to **NOT** place all of their balance into the + /// lock and keep some for further operations. /// /// # - /// Base weight: 47.93 µs - /// State reads: - /// - Candidates.len() + Members.len() + RunnersUp.len() - /// - Voting (is_voter) - /// - Lock - /// - [AccountBalance(who) (unreserve + total_balance)] - /// State writes: - /// - Voting - /// - Lock - /// - [AccountBalance(who) (unreserve -- only when creating a new voter)] + /// We assume the maximum weight among all 3 cases: vote_equal, vote_more and vote_less. /// # - #[weight = T::WeightInfo::vote(votes.len() as u32)] + #[weight = + T::WeightInfo::vote_more(votes.len() as u32) + .max(T::WeightInfo::vote_less(votes.len() as u32)) + .max(T::WeightInfo::vote_equal(votes.len() as u32)) + ] fn vote( origin, votes: Vec, @@ -336,6 +412,7 @@ decl_module! { ) { let who = ensure_signed(origin)?; + // votes should not be empty and more than `MAXIMUM_VOTE` in any case. ensure!(votes.len() <= MAXIMUM_VOTE, Error::::MaximumVotesExceeded); ensure!(!votes.is_empty(), Error::::NoVotes); @@ -343,156 +420,73 @@ decl_module! { let members_count = >::decode_len().unwrap_or(0); let runners_up_count = >::decode_len().unwrap_or(0); + // can never submit a vote of there are no members, and cannot submit more votes than + // all potential vote targets. // addition is valid: candidates, members and runners-up will never overlap. - let allowed_votes = candidates_count + members_count + runners_up_count; - + let allowed_votes = candidates_count + .saturating_add(members_count) + .saturating_add(runners_up_count); ensure!(!allowed_votes.is_zero(), Error::::UnableToVote); ensure!(votes.len() <= allowed_votes, Error::::TooManyVotes); ensure!(value > T::Currency::minimum_balance(), Error::::LowBalance); - // first time voter. Reserve bond. - if !Self::is_voter(&who) { - T::Currency::reserve(&who, T::VotingBond::get()) - .map_err(|_| Error::::UnableToPayBond)?; - } + // Reserve bond. + let new_deposit = Self::deposit_of(votes.len()); + let Voter { deposit: old_deposit, .. } = >::get(&who); + match new_deposit.cmp(&old_deposit) { + Ordering::Greater => { + // Must reserve a bit more. + let to_reserve = new_deposit - old_deposit; + T::Currency::reserve(&who, to_reserve).map_err(|_| Error::::UnableToPayBond)?; + }, + Ordering::Equal => {}, + Ordering::Less => { + // Must unreserve a bit. + let to_unreserve = old_deposit - new_deposit; + let _remainder = T::Currency::unreserve(&who, to_unreserve); + debug_assert!(_remainder.is_zero()); + }, + }; // Amount to be locked up. - let locked_balance = value.min(T::Currency::total_balance(&who)); - - // lock + let locked_stake = value.min(T::Currency::total_balance(&who)); T::Currency::set_lock( T::ModuleId::get(), &who, - locked_balance, - WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), + locked_stake, + WithdrawReasons::all(), ); - Voting::::insert(&who, (locked_balance, votes)); + Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); } - /// Remove `origin` as a voter. This removes the lock and returns the bond. + /// Remove `origin` as a voter. /// - /// # - /// Base weight: 36.8 µs - /// All state access is from do_remove_voter. - /// State reads: - /// - Voting - /// - [AccountData(who)] - /// State writes: - /// - Voting - /// - Locks - /// - [AccountData(who)] - /// # + /// This removes the lock and returns the deposit. + /// + /// The dispatch origin of this call must be signed and be a voter. #[weight = T::WeightInfo::remove_voter()] fn remove_voter(origin) { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); - - Self::do_remove_voter(&who, true); + Self::do_remove_voter(&who); } - /// Report `target` for being an defunct voter. In case of a valid report, the reporter is - /// rewarded by the bond amount of `target`. Otherwise, the reporter itself is removed and - /// their bond is slashed. - /// - /// A defunct voter is defined to be: - /// - a voter whose current submitted votes are all invalid. i.e. all of them are no - /// longer a candidate nor an active member or a runner-up. + /// Submit oneself for candidacy. A fixed amount of deposit is recorded. /// + /// All candidates are wiped at the end of the term. They either become a member/runner-up, + /// or leave the system while their deposit is slashed. /// - /// The origin must provide the number of current candidates and votes of the reported target - /// for the purpose of accurate weight calculation. + /// The dispatch origin of this call must be signed. /// - /// # - /// No Base weight based on min square analysis. - /// Complexity of candidate_count: 1.755 µs - /// Complexity of vote_count: 18.51 µs - /// State reads: - /// - Voting(reporter) - /// - Candidate.len() - /// - Voting(Target) - /// - Candidates, Members, RunnersUp (is_defunct_voter) - /// State writes: - /// - Lock(reporter || target) - /// - [AccountBalance(reporter)] + AccountBalance(target) - /// - Voting(reporter || target) - /// Note: the db access is worse with respect to db, which is when the report is correct. - /// # - #[weight = T::WeightInfo::report_defunct_voter_correct( - defunct.candidate_count, - defunct.vote_count, - )] - fn report_defunct_voter( - origin, - defunct: DefunctVoter<::Source>, - ) -> DispatchResultWithPostInfo { - let reporter = ensure_signed(origin)?; - let target = T::Lookup::lookup(defunct.who)?; - - ensure!(reporter != target, Error::::ReportSelf); - ensure!(Self::is_voter(&reporter), Error::::MustBeVoter); - - let DefunctVoter { candidate_count, vote_count, .. } = defunct; - - ensure!( - >::decode_len().unwrap_or(0) as u32 <= candidate_count, - Error::::InvalidCandidateCount, - ); - - let (_, votes) = >::get(&target); - // indirect way to ensure target is a voter. We could call into `::contains()`, but it - // would have the same effect with one extra db access. Note that votes cannot be - // submitted with length 0. Hence, a non-zero length means that the target is a voter. - ensure!(votes.len() > 0, Error::::MustBeVoter); - - // ensure that the size of votes that need to be searched is correct. - ensure!( - votes.len() as u32 <= vote_count, - Error::::InvalidVoteCount, - ); - - let valid = Self::is_defunct_voter(&votes); - let maybe_refund = if valid { - // reporter will get the voting bond of the target - T::Currency::repatriate_reserved(&target, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; - // remove the target. They are defunct. - Self::do_remove_voter(&target, false); - None - } else { - // slash the bond of the reporter. - let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; - T::BadReport::on_unbalanced(imbalance); - // remove the reporter. - Self::do_remove_voter(&reporter, false); - Some(T::WeightInfo::report_defunct_voter_incorrect( - defunct.candidate_count, - defunct.vote_count, - )) - }; - Self::deposit_event(RawEvent::VoterReported(target, reporter, valid)); - Ok(maybe_refund.into()) - } - - /// Submit oneself for candidacy. + /// ### Warning /// - /// A candidate will either: - /// - Lose at the end of the term and forfeit their deposit. - /// - Win and become a member. Members will eventually get their stash back. - /// - Become a runner-up. Runners-ups are reserved members in case one gets forcefully - /// removed. + /// Even if a candidate ends up being a member, they must call [`Call::renounce_candidacy`] + /// to get their deposit back. Losing the spot in an election will always lead to a slash. /// /// # - /// Base weight = 33.33 µs - /// Complexity of candidate_count: 0.375 µs - /// State reads: - /// - Candidates - /// - Members - /// - RunnersUp - /// - [AccountBalance(who)] - /// State writes: - /// - [AccountBalance(who)] - /// - Candidates + /// The number of current candidates must be provided as witness data. /// # #[weight = T::WeightInfo::submit_candidacy(*candidate_count)] fn submit_candidacy(origin, #[compact] candidate_count: u32) { @@ -501,60 +495,37 @@ decl_module! { let actual_count = >::decode_len().unwrap_or(0); ensure!( actual_count as u32 <= candidate_count, - Error::::InvalidCandidateCount, + Error::::InvalidWitnessData, ); - let is_candidate = Self::is_candidate(&who); - ensure!(is_candidate.is_err(), Error::::DuplicatedCandidate); - - // assured to be an error, error always contains the index. - let index = is_candidate.unwrap_err(); + let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; ensure!(!Self::is_member(&who), Error::::MemberSubmit); - ensure!(!Self::is_runner_up(&who), Error::::RunnerSubmit); + ensure!(!Self::is_runner_up(&who), Error::::RunnerUpSubmit); T::Currency::reserve(&who, T::CandidacyBond::get()) .map_err(|_| Error::::InsufficientCandidateFunds)?; - >::mutate(|c| c.insert(index, who)); + >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); } /// Renounce one's intention to be a candidate for the next election round. 3 potential /// outcomes exist: - /// - `origin` is a candidate and not elected in any set. In this case, the bond is + /// + /// - `origin` is a candidate and not elected in any set. In this case, the deposit is /// unreserved, returned and origin is removed as a candidate. - /// - `origin` is a current runner-up. In this case, the bond is unreserved, returned and + /// - `origin` is a current runner-up. In this case, the deposit is unreserved, returned and /// origin is removed as a runner-up. - /// - `origin` is a current member. In this case, the bond is unreserved and origin is + /// - `origin` is a current member. In this case, the deposit is unreserved and origin is /// removed as a member, consequently not being a candidate for the next round anymore. - /// Similar to [`remove_voter`], if replacement runners exists, they are immediately used. - /// - /// If a candidate is renouncing: - /// Base weight: 17.28 µs - /// Complexity of candidate_count: 0.235 µs - /// State reads: - /// - Candidates - /// - [AccountBalance(who) (unreserve)] - /// State writes: - /// - Candidates - /// - [AccountBalance(who) (unreserve)] - /// If member is renouncing: - /// Base weight: 46.25 µs - /// State reads: - /// - Members, RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// State writes: - /// - Members, RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// If runner is renouncing: - /// Base weight: 46.25 µs - /// State reads: - /// - RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// State writes: - /// - RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// + /// Similar to [`remove_members`], if replacement runners exists, they are immediately used. + /// If the prime is renouncing, then no prime will exist until the next round. + /// + /// The dispatch origin of this call must be signed, and have one of the above roles. + /// + /// # + /// The type of renouncing must be provided as witness data. + /// # #[weight = match *renouncing { Renouncing::Candidate(count) => T::WeightInfo::renounce_candidacy_candidate(count), Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), @@ -564,38 +535,36 @@ decl_module! { let who = ensure_signed(origin)?; match renouncing { Renouncing::Member => { - // returns NoMember error in case of error. - let _ = Self::remove_and_replace_member(&who)?; - T::Currency::unreserve(&who, T::CandidacyBond::get()); - Self::deposit_event(RawEvent::MemberRenounced(who)); + let _ = Self::remove_and_replace_member(&who, false) + .map_err(|_| Error::::InvalidRenouncing)?; + Self::deposit_event(RawEvent::Renounced(who)); }, Renouncing::RunnerUp => { - let mut runners_up_with_stake = Self::runners_up(); - if let Some(index) = runners_up_with_stake - .iter() - .position(|(ref r, ref _s)| r == &who) - { - runners_up_with_stake.remove(index); - // unreserve the bond - T::Currency::unreserve(&who, T::CandidacyBond::get()); - // update storage. - >::put(runners_up_with_stake); - } else { - Err(Error::::InvalidRenouncing)?; - } + >::try_mutate::<_, Error, _>(|runners_up| { + let index = runners_up + .iter() + .position(|SeatHolder { who: r, .. }| r == &who) + .ok_or(Error::::InvalidRenouncing)?; + // can't fail anymore. + let SeatHolder { deposit, .. } = runners_up.remove(index); + let _remainder = T::Currency::unreserve(&who, deposit); + debug_assert!(_remainder.is_zero()); + Self::deposit_event(RawEvent::Renounced(who)); + Ok(()) + })?; } Renouncing::Candidate(count) => { - let mut candidates = Self::candidates(); - ensure!(count >= candidates.len() as u32, Error::::InvalidRenouncing); - if let Some(index) = candidates.iter().position(|x| *x == who) { - candidates.remove(index); - // unreserve the bond - T::Currency::unreserve(&who, T::CandidacyBond::get()); - // update storage. - >::put(candidates); - } else { - Err(Error::::InvalidRenouncing)?; - } + >::try_mutate::<_, Error, _>(|candidates| { + ensure!(count >= candidates.len() as u32, Error::::InvalidWitnessData); + let index = candidates + .binary_search_by(|(c, _)| c.cmp(&who)) + .map_err(|_| Error::::InvalidRenouncing)?; + let (_removed, deposit) = candidates.remove(index); + let _remainder = T::Currency::unreserve(&who, deposit); + debug_assert!(_remainder.is_zero()); + Self::deposit_event(RawEvent::Renounced(who)); + Ok(()) + })?; } }; } @@ -606,22 +575,18 @@ decl_module! { /// If a runner-up is available, then the best runner-up will be removed and replaces the /// outgoing member. Otherwise, a new phragmen election is started. /// + /// The dispatch origin of this call must be root. + /// /// Note that this does not affect the designated block number of the next election. /// /// # - /// If we have a replacement: - /// - Base weight: 50.93 µs - /// - State reads: - /// - RunnersUp.len() - /// - Members, RunnersUp (remove_and_replace_member) - /// - State writes: - /// - Members, RunnersUp (remove_and_replace_member) - /// Else, since this is a root call and will go into phragmen, we assume full block for now. + /// If we have a replacement, we use a small weight. Else, since this is a root call and + /// will go into phragmen, we assume full block for now. /// # #[weight = if *has_replacement { T::WeightInfo::remove_member_with_replacement() } else { - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block }] fn remove_member( origin, @@ -631,160 +596,196 @@ decl_module! { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; - let will_have_replacement = >::decode_len().unwrap_or(0) > 0; + let will_have_replacement = >::decode_len().map_or(false, |l| l > 0); if will_have_replacement != has_replacement { - // In both cases, we will change more weight than neede. Refund and abort. + // In both cases, we will change more weight than need. Refund and abort. return Err(Error::::InvalidReplacement.with_weight( // refund. The weight value comes from a benchmark which is special to this. - // 5.751 µs T::WeightInfo::remove_member_wrong_refund() )); - } // else, prediction was correct. + } - Self::remove_and_replace_member(&who).map(|had_replacement| { - let (imbalance, _) = T::Currency::slash_reserved(&who, T::CandidacyBond::get()); - T::KickedMember::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::MemberKicked(who.clone())); + let had_replacement = Self::remove_and_replace_member(&who, true)?; + debug_assert_eq!(has_replacement, had_replacement); + Self::deposit_event(RawEvent::MemberKicked(who.clone())); - if !had_replacement { - // if we end up here, we will charge a full block weight. - Self::do_phragmen(); - } + if !had_replacement { + Self::do_phragmen(); + } - // no refund needed. - None.into() - }).map_err(|e| e.into()) + // no refund needed. + Ok(None.into()) } - /// What to do at the end of each block. Checks if an election needs to happen or not. + /// Clean all voters who are defunct (i.e. they do not serve any purpose at all). The + /// deposit of the removed voters are returned. + /// + /// This is an root function to be used only for cleaning the state. + /// + /// The dispatch origin of this call must be root. + /// + /// # + /// The total number of voters and those that are defunct must be provided as witness data. + /// # + #[weight = T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct)] + fn clean_defunct_voters(origin, _num_voters: u32, _num_defunct: u32) { + let _ = ensure_root(origin)?; + >::iter() + .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) + .for_each(|(dv, _)| { + Self::do_remove_voter(&dv) + }) + } + + /// What to do at the end of each block. + /// + /// Checks if an election needs to happen or not. fn on_initialize(n: T::BlockNumber) -> Weight { - // returns the correct weight. - Self::end_block(n) + let term_duration = T::TermDuration::get(); + if !term_duration.is_zero() && (n % term_duration).is_zero() { + Self::do_phragmen() + } else { + 0 + } } } } -decl_event!( - pub enum Event where - Balance = BalanceOf, - ::AccountId, - { - /// A new term with \[new_members\]. This indicates that enough candidates existed to run the - /// election, not that enough have has been elected. The inner value must be examined for - /// this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond slashed and - /// none were elected, whilst `EmptyTerm` means that no candidates existed to begin with. - NewTerm(Vec<(AccountId, Balance)>), - /// No (or not enough) candidates existed for this round. This is different from - /// `NewTerm(\[\])`. See the description of `NewTerm`. - EmptyTerm, - /// Internal error happened while trying to perform election. - ElectionError, - /// A \[member\] has been removed. This should always be followed by either `NewTerm` or - /// `EmptyTerm`. - MemberKicked(AccountId), - /// A \[member\] has renounced their candidacy. - MemberRenounced(AccountId), - /// A voter was reported with the the report being successful or not. - /// \[voter, reporter, success\] - VoterReported(AccountId, AccountId, bool), +impl Module { + /// The deposit value of `count` votes. + fn deposit_of(count: usize) -> BalanceOf { + T::VotingBondBase::get().saturating_add( + T::VotingBondFactor::get().saturating_mul((count as u32).into()) + ) } -); -impl Module { - /// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement and - /// Ok(true). is returned. + /// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement. /// - /// Otherwise, `Ok(false)` is returned to signal the caller. + /// Returns: /// - /// If a replacement exists, `Members` and `RunnersUp` storage is updated, where the first - /// element of `RunnersUp` is used as the replacement and `Ok(true)` is returned. Else, - /// `Ok(false)` is returned with no storage updated. + /// - `Ok(true)` if the member was removed and a replacement was found. + /// - `Ok(false)` if the member was removed and but no replacement was found. + /// - `Err(_)` if the member was no found. /// - /// Note that this function _will_ call into `T::ChangeMembers` in case any change happens - /// (`Ok(true)`). + /// Both `Members` and `RunnersUp` storage is updated accordingly. `T::ChangeMember` is called + /// if needed. If `slash` is true, the deposit of the potentially removed member is slashed, + /// else, it is unreserved. /// - /// If replacement exists, this will read and write from/into both `Members` and `RunnersUp`. - fn remove_and_replace_member(who: &T::AccountId) -> Result { - let mut members_with_stake = Self::members(); - if let Ok(index) = members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(who)) { - members_with_stake.remove(index); - - let next_up = >::mutate(|runners_up| runners_up.pop()); - let maybe_replacement = next_up.and_then(|(replacement, stake)| - members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(&replacement)) - .err() - .map(|index| { - members_with_stake.insert(index, (replacement.clone(), stake)); - replacement - }) - ); + /// ### Note: Prime preservation + /// + /// This function attempts to preserve the prime. If the removed members is not the prime, it is + /// set again via [`Config::ChangeMembers`]. + fn remove_and_replace_member(who: &T::AccountId, slash: bool) -> Result { + // closure will return: + // - `Ok(Option(replacement))` if member was removed and replacement was replaced. + // - `Ok(None)` if member was removed but no replacement was found + // - `Err(_)` if who is not a member. + let maybe_replacement = >::try_mutate::<_, Error, _>(|members| { + let remove_index = + members.binary_search_by(|m| m.who.cmp(who)).map_err(|_| Error::::NotMember)?; + // we remove the member anyhow, regardless of having a runner-up or not. + let removed = members.remove(remove_index); + + // slash or unreserve + if slash { + let (imbalance, _remainder) = T::Currency::slash_reserved(who, removed.deposit); + debug_assert!(_remainder.is_zero()); + T::LoserCandidate::on_unbalanced(imbalance); + Self::deposit_event(RawEvent::SeatHolderSlashed(who.clone(), removed.deposit)); + } else { + T::Currency::unreserve(who, removed.deposit); + } + + let maybe_next_best = >::mutate(|r| r.pop()).map(|next_best| { + // defensive-only: Members and runners-up are disjoint. This will always be err and + // give us an index to insert. + if let Err(index) = members.binary_search_by(|m| m.who.cmp(&next_best.who)) { + members.insert(index, next_best.clone()); + } else { + // overlap. This can never happen. If so, it seems like our intended replacement + // is already a member, so not much more to do. + frame_support::debug::error!( + "pallet-elections-phragmen: a member seems to also be a runner-up." + ); + } + next_best + }); + Ok(maybe_next_best) + })?; - >::put(&members_with_stake); - let members = members_with_stake.into_iter().map(|m| m.0).collect::>(); - let result = Ok(maybe_replacement.is_some()); - let old = [who.clone()]; - match maybe_replacement { - Some(new) => T::ChangeMembers::change_members_sorted(&[new], &old, &members), - None => T::ChangeMembers::change_members_sorted(&[], &old, &members), + let remaining_member_ids_sorted = Self::members() + .into_iter() + .map(|x| x.who.clone()) + .collect::>(); + let outgoing = &[who.clone()]; + let maybe_current_prime = T::ChangeMembers::get_prime(); + let return_value = match maybe_replacement { + // member ids are already sorted, other two elements have one item. + Some(incoming) => { + T::ChangeMembers::change_members_sorted( + &[incoming.who], + outgoing, + &remaining_member_ids_sorted[..] + ); + true + } + None => { + T::ChangeMembers::change_members_sorted( + &[], + outgoing, + &remaining_member_ids_sorted[..] + ); + false + } + }; + + // if there was a prime before and they are not the one being removed, then set them + // again. + if let Some(current_prime) = maybe_current_prime { + if ¤t_prime != who { + T::ChangeMembers::set_prime(Some(current_prime)); } - result - } else { - Err(Error::::NotMember)? } + + Ok(return_value) } /// Check if `who` is a candidate. It returns the insert index if the element does not exists as /// an error. - /// - /// O(LogN) given N candidates. fn is_candidate(who: &T::AccountId) -> Result<(), usize> { - Self::candidates().binary_search(who).map(|_| ()) + Self::candidates().binary_search_by(|c| c.0.cmp(who)).map(|_| ()) } /// Check if `who` is a voter. It may or may not be a _current_ one. - /// - /// State: O(1). fn is_voter(who: &T::AccountId) -> bool { Voting::::contains_key(who) } /// Check if `who` is currently an active member. - /// - /// O(LogN) given N members. Since members are limited, O(1). fn is_member(who: &T::AccountId) -> bool { - Self::members().binary_search_by(|(a, _b)| a.cmp(who)).is_ok() + Self::members().binary_search_by(|m| m.who.cmp(who)).is_ok() } /// Check if `who` is currently an active runner-up. - /// - /// O(LogN) given N runners-up. Since runners-up are limited, O(1). fn is_runner_up(who: &T::AccountId) -> bool { - Self::runners_up().iter().position(|(a, _b)| a == who).is_some() - } - - /// Returns number of desired members. - fn desired_members() -> u32 { - T::DesiredMembers::get() - } - - /// Returns number of desired runners up. - fn desired_runners_up() -> u32 { - T::DesiredRunnersUp::get() - } - - /// Returns the term duration - fn term_duration() -> T::BlockNumber { - T::TermDuration::get() + Self::runners_up().iter().position(|r| &r.who == who).is_some() } /// Get the members' account ids. fn members_ids() -> Vec { - Self::members().into_iter().map(|(m, _)| m).collect::>() + Self::members().into_iter().map(|m| m.who).collect::>() } - /// The the runners' up account ids. - fn runners_up_ids() -> Vec { - Self::runners_up().into_iter().map(|(r, _)| r).collect::>() + /// Get a concatenation of previous members and runners-up and their deposits. + /// + /// These accounts are essentially treated as candidates. + fn implicit_candidates_with_deposit() -> Vec<(T::AccountId, BalanceOf)> { + // invariant: these two are always without duplicates. + Self::members() + .into_iter() + .map(|m| (m.who, m.deposit)) + .chain(Self::runners_up().into_iter().map(|r| (r.who, r.deposit))) + .collect::>() } /// Check if `votes` will correspond to a defunct voter. As no origin is part of the inputs, @@ -801,118 +802,115 @@ impl Module { } /// Remove a certain someone as a voter. - /// - /// This will clean always clean the storage associated with the voter, and remove the balance - /// lock. Optionally, it would also return the reserved voting bond if indicated by `unreserve`. - /// If unreserve is true, has 3 storage reads and 1 reads. - /// - /// DB access: Voting and Lock are always written to, if unreserve, then 1 read and write added. - fn do_remove_voter(who: &T::AccountId, unreserve: bool) { - // remove storage and lock. - Voting::::remove(who); + fn do_remove_voter(who: &T::AccountId) { + let Voter { deposit, .. } = >::take(who); + + // remove storage, lock and unreserve. T::Currency::remove_lock(T::ModuleId::get(), who); - if unreserve { - T::Currency::unreserve(who, T::VotingBond::get()); - } - } - - /// Check there's nothing to do this block. - /// - /// Runs phragmen election and cleans all the previous candidate state. The voter state is NOT - /// cleaned and voters must themselves submit a transaction to retract. - fn end_block(block_number: T::BlockNumber) -> Weight { - if !Self::term_duration().is_zero() { - if (block_number % Self::term_duration()).is_zero() { - Self::do_phragmen(); - return T::MaximumBlockWeight::get() - } - } - 0 + // NOTE: we could check the deposit amount before removing and skip if zero, but it will be + // a noop anyhow. + let _remainder = T::Currency::unreserve(who, deposit); + debug_assert!(_remainder.is_zero()); } /// Run the phragmen election with all required side processes and state updates, if election /// succeeds. Else, it will emit an `ElectionError` event. /// /// Calls the appropriate [`ChangeMembers`] function variant internally. - /// - /// Reads: O(C + V*E) where C = candidates, V voters and E votes per voter exits. - /// Writes: O(M + R) with M desired members and R runners_up. - fn do_phragmen() { - let desired_seats = Self::desired_members() as usize; - let desired_runners_up = Self::desired_runners_up() as usize; + fn do_phragmen() -> Weight { + let desired_seats = T::DesiredMembers::get() as usize; + let desired_runners_up = T::DesiredRunnersUp::get() as usize; let num_to_elect = desired_runners_up + desired_seats; - let mut candidates = Self::candidates(); - // candidates who explicitly called `submit_candidacy`. Only these folks are at risk of - // losing their bond. - let exposed_candidates = candidates.clone(); - // current members are always a candidate for the next round as well. - // this is guaranteed to not create any duplicates. - candidates.append(&mut Self::members_ids()); - // previous runners_up are also always candidates for the next round. - candidates.append(&mut Self::runners_up_ids()); - - if candidates.len().is_zero() { + let mut candidates_and_deposit = Self::candidates(); + // add all the previous members and runners-up as candidates as well. + candidates_and_deposit.append(&mut Self::implicit_candidates_with_deposit()); + + if candidates_and_deposit.len().is_zero() { Self::deposit_event(RawEvent::EmptyTerm); - return; + return T::DbWeight::get().reads(5); } + // All of the new winners that come out of phragmen will thus have a deposit recorded. + let candidate_ids = candidates_and_deposit + .iter() + .map(|(x, _)| x) + .cloned() + .collect::>(); + // helper closures to deal with balance/stake. let total_issuance = T::Currency::total_issuance(); let to_votes = |b: BalanceOf| T::CurrencyToVote::to_vote(b, total_issuance); let to_balance = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); + let mut num_edges: u32 = 0; // used for prime election. let voters_and_stakes = Voting::::iter() - .map(|(voter, (stake, votes))| (voter, stake, votes)) + .map(|(voter, Voter { stake, votes, .. })| { (voter, stake, votes) }) .collect::>(); // used for phragmen. let voters_and_votes = voters_and_stakes.iter() .cloned() - .map(|(voter, stake, votes)| { (voter, to_votes(stake), votes)} ) + .map(|(voter, stake, votes)| { + num_edges = num_edges.saturating_add(votes.len() as u32); + (voter, to_votes(stake), votes) + }) .collect::>(); + let weight_candidates = candidates_and_deposit.len() as u32; + let weight_voters = voters_and_votes.len() as u32; + let weight_edges = num_edges; let _ = sp_npos_elections::seq_phragmen::( num_to_elect, - candidates, + candidate_ids, voters_and_votes.clone(), None, - ).map(|ElectionResult { winners, assignments: _ }| { + ).map(|ElectionResult { winners, assignments: _, }| { // this is already sorted by id. let old_members_ids_sorted = >::take().into_iter() - .map(|(m, _)| m) + .map(|m| m.who) .collect::>(); // this one needs a sort by id. let mut old_runners_up_ids_sorted = >::take().into_iter() - .map(|(r, _)| r) + .map(|r| r.who) .collect::>(); old_runners_up_ids_sorted.sort(); // filter out those who end up with no backing stake. - let new_set_with_stake = winners + let mut new_set_with_stake = winners .into_iter() .filter_map(|(m, b)| if b.is_zero() { None } else { Some((m, to_balance(b))) }) .collect::)>>(); - // OPTIMISATION NOTE: we could bail out here if `new_set.len() == 0`. There isn't much - // left to do. Yet, re-arranging the code would require duplicating the slashing of - // exposed candidates, cleaning any previous members, and so on. For now, in favour of - // readability and veracity, we keep it simple. + // OPTIMIZATION NOTE: we could bail out here if `new_set.len() == 0`. There isn't + // much left to do. Yet, re-arranging the code would require duplicating the + // slashing of exposed candidates, cleaning any previous members, and so on. For + // now, in favor of readability and veracity, we keep it simple. // split new set into winners and runners up. let split_point = desired_seats.min(new_set_with_stake.len()); - let mut new_members_sorted_by_id = (&new_set_with_stake[..split_point]).to_vec(); - - // save the runners up as-is. They are sorted based on desirability. - // save the members, sorted based on account id. + let mut new_members_sorted_by_id = new_set_with_stake.drain(..split_point).collect::>(); new_members_sorted_by_id.sort_by(|i, j| i.0.cmp(&j.0)); - // Now we select a prime member using a [Borda count](https://en.wikipedia.org/wiki/Borda_count). - // We weigh everyone's vote for that new member by a multiplier based on the order - // of the votes. i.e. the first person a voter votes for gets a 16x multiplier, - // the next person gets a 15x multiplier, an so on... (assuming `MAXIMUM_VOTE` = 16) - let mut prime_votes: Vec<_> = new_members_sorted_by_id.iter().map(|c| (&c.0, BalanceOf::::zero())).collect(); + // all the rest will be runners-up + new_set_with_stake.reverse(); + let new_runners_up_sorted_by_rank = new_set_with_stake; + let mut new_runners_up_ids_sorted = new_runners_up_sorted_by_rank + .iter() + .map(|(r, _)| r.clone()) + .collect::>(); + new_runners_up_ids_sorted.sort(); + + // Now we select a prime member using a [Borda + // count](https://en.wikipedia.org/wiki/Borda_count). We weigh everyone's vote for + // that new member by a multiplier based on the order of the votes. i.e. the first + // person a voter votes for gets a 16x multiplier, the next person gets a 15x + // multiplier, an so on... (assuming `MAXIMUM_VOTE` = 16) + let mut prime_votes = new_members_sorted_by_id + .iter() + .map(|c| (&c.0, BalanceOf::::zero())) + .collect::>(); for (_, stake, votes) in voters_and_stakes.into_iter() { for (vote_multiplier, who) in votes.iter() .enumerate() @@ -925,9 +923,9 @@ impl Module { } } } - // We then select the new member with the highest weighted stake. In the case of - // a tie, the last person in the list with the tied score is selected. This is - // the person with the "highest" account id based on the sort above. + // We then select the new member with the highest weighted stake. In the case of a tie, + // the last person in the list with the tied score is selected. This is the person with + // the "highest" account id based on the sort above. let prime = prime_votes.into_iter().max_by_key(|x| x.1).map(|x| x.0.clone()); // new_members_sorted_by_id is sorted by account id. @@ -936,20 +934,8 @@ impl Module { .map(|(m, _)| m.clone()) .collect::>(); - let new_runners_up_sorted_by_rank = &new_set_with_stake[split_point..] - .into_iter() - .cloned() - .rev() - .collect::)>>(); - // new_runners_up remains sorted by desirability. - let mut new_runners_up_ids_sorted = new_runners_up_sorted_by_rank - .iter() - .map(|(r, _)| r.clone()) - .collect::>(); - new_runners_up_ids_sorted.sort(); - // report member changes. We compute diff because we need the outgoing list. - let (incoming, outgoing) = T::ChangeMembers::compute_members_diff( + let (incoming, outgoing) = T::ChangeMembers::compute_members_diff_sorted( &new_members_ids_sorted, &old_members_ids_sorted, ); @@ -960,119 +946,122 @@ impl Module { ); T::ChangeMembers::set_prime(prime); - // outgoing members who are no longer a runner-up lose their bond. - let mut to_burn_bond = outgoing - .iter() - .filter(|o| new_runners_up_ids_sorted.binary_search(o).is_err()) - .cloned() - .collect::>(); - - // compute the outgoing of runners up as well and append them to the `to_burn_bond`, if - // they are not members. - { - let (_, outgoing) = T::ChangeMembers::compute_members_diff( - &new_runners_up_ids_sorted, - &old_runners_up_ids_sorted, - ); - // none of the ones computed to be outgoing must still be in the list. - debug_assert!(outgoing.iter().all(|o| !new_runners_up_ids_sorted.contains(o))); - to_burn_bond.extend( - outgoing - .iter() - .filter(|o| new_members_ids_sorted.binary_search(o).is_err()) - .cloned() - .collect::>() - ); - } - - // Burn loser bond. members list is sorted. O(NLogM) (N candidates, M members) - // runner up list is also sorted. O(NLogK) given K runner ups. Overall: O(NLogM + N*K) - // both the member and runner counts are bounded. - exposed_candidates.into_iter().for_each(|c| { - // any candidate who is not a member and not a runner up. + // All candidates/members/runners-up who are no longer retaining a position as a + // seat holder will lose their bond. + candidates_and_deposit.iter().for_each(|(c, d)| { if - new_members_ids_sorted.binary_search(&c).is_err() && - new_runners_up_ids_sorted.binary_search(&c).is_err() + new_members_ids_sorted.binary_search(c).is_err() && + new_runners_up_ids_sorted.binary_search(c).is_err() { - let (imbalance, _) = T::Currency::slash_reserved(&c, T::CandidacyBond::get()); + let (imbalance, _) = T::Currency::slash_reserved(c, *d); T::LoserCandidate::on_unbalanced(imbalance); + Self::deposit_event(RawEvent::CandidateSlashed(c.clone(), *d)); } }); - // Burn outgoing bonds - to_burn_bond.into_iter().for_each(|x| { - let (imbalance, _) = T::Currency::slash_reserved(&x, T::CandidacyBond::get()); - T::LoserCandidate::on_unbalanced(imbalance); - }); - - >::put(&new_members_sorted_by_id); - >::put(new_runners_up_sorted_by_rank); - - Self::deposit_event(RawEvent::NewTerm(new_members_sorted_by_id.clone().to_vec())); + // write final values to storage. + let deposit_of_candidate = |x: &T::AccountId| -> BalanceOf { + // defensive-only. This closure is used against the new members and new runners-up, + // both of which are phragmen winners and thus must have deposit. + candidates_and_deposit + .iter() + .find_map(|(c, d)| if c == x { Some(*d) } else { None }) + .unwrap_or_default() + }; + // fetch deposits from the one recorded one. This will make sure that a candidate who + // submitted candidacy before a change to candidacy deposit will have the correct amount + // recorded. + >::put( + new_members_sorted_by_id + .iter() + .map(|(who, stake)| SeatHolder { + deposit: deposit_of_candidate(&who), + who: who.clone(), + stake: stake.clone(), + }) + .collect::>(), + ); + >::put( + new_runners_up_sorted_by_rank + .into_iter() + .map(|(who, stake)| SeatHolder { + deposit: deposit_of_candidate(&who), + who, + stake, + }) + .collect::>(), + ); // clean candidates. >::kill(); + Self::deposit_event(RawEvent::NewTerm(new_members_sorted_by_id)); ElectionRounds::mutate(|v| *v += 1); }).map_err(|e| { frame_support::debug::error!("elections-phragmen: failed to run election [{:?}].", e); Self::deposit_event(RawEvent::ElectionError); }); + + T::WeightInfo::election_phragmen(weight_candidates, weight_voters, weight_edges) } } -impl Contains for Module { +impl Contains for Module { fn contains(who: &T::AccountId) -> bool { Self::is_member(who) } - fn sorted_members() -> Vec { Self::members_ids() } + + fn sorted_members() -> Vec { + Self::members_ids() + } // A special function to populate members in this pallet for passing Origin // checks in runtime benchmarking. #[cfg(feature = "runtime-benchmarks")] fn add(who: &T::AccountId) { Members::::mutate(|members| { - match members.binary_search_by(|(a, _b)| a.cmp(who)) { + match members.binary_search_by(|m| m.who.cmp(who)) { Ok(_) => (), - Err(pos) => members.insert(pos, (who.clone(), BalanceOf::::default())), + Err(pos) => members.insert(pos, SeatHolder { who: who.clone(), ..Default::default() }), } }) } } -impl ContainsLengthBound for Module { +impl ContainsLengthBound for Module { fn min_len() -> usize { 0 } /// Implementation uses a parameter type so calling is cost-free. fn max_len() -> usize { - Self::desired_members() as usize + T::DesiredMembers::get() as usize } } #[cfg(test)] mod tests { use super::*; - use std::cell::RefCell; - use frame_support::{assert_ok, assert_noop, assert_err_with_weight, parameter_types, - weights::Weight, + use frame_support::{assert_ok, assert_noop, parameter_types, + traits::OnInitialize, }; use substrate_test_utils::assert_eq_uvec; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, BuildStorage, DispatchResult, - traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, + testing::Header, BuildStorage, DispatchResult, + traits::{BlakeTwo256, IdentityLookup}, }; use crate as elections_phragmen; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -1084,26 +1073,20 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = Event; type DustRemoval = (); @@ -1113,40 +1096,15 @@ mod tests { type WeightInfo = (); } - parameter_types! { - pub const CandidacyBond: u64 = 3; - } - - thread_local! { - static VOTING_BOND: RefCell = RefCell::new(2); - static DESIRED_MEMBERS: RefCell = RefCell::new(2); - static DESIRED_RUNNERS_UP: RefCell = RefCell::new(2); - static TERM_DURATION: RefCell = RefCell::new(5); - } - - pub struct VotingBond; - impl Get for VotingBond { - fn get() -> u64 { VOTING_BOND.with(|v| *v.borrow()) } - } - - pub struct DesiredMembers; - impl Get for DesiredMembers { - fn get() -> u32 { DESIRED_MEMBERS.with(|v| *v.borrow()) } - } - - pub struct DesiredRunnersUp; - impl Get for DesiredRunnersUp { - fn get() -> u32 { DESIRED_RUNNERS_UP.with(|v| *v.borrow()) } - } - - pub struct TermDuration; - impl Get for TermDuration { - fn get() -> u64 { TERM_DURATION.with(|v| *v.borrow()) } - } - - thread_local! { - pub static MEMBERS: RefCell> = RefCell::new(vec![]); - pub static PRIME: RefCell> = RefCell::new(None); + frame_support::parameter_types! { + pub static VotingBondBase: u64 = 2; + pub static VotingBondFactor: u64 = 0; + pub static CandidacyBond: u64 = 3; + pub static DesiredMembers: u32 = 2; + pub static DesiredRunnersUp: u32 = 0; + pub static TermDuration: u64 = 5; + pub static Members: Vec = vec![]; + pub static Prime: Option = None; } pub struct TestChangeMembers; @@ -1187,13 +1145,17 @@ mod tests { fn set_prime(who: Option) { PRIME.with(|p| *p.borrow_mut() = who); } + + fn get_prime() -> Option { + PRIME.with(|p| *p.borrow()) + } } - parameter_types!{ + parameter_types! { pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; } - impl Trait for Test { + impl Config for Test { type ModuleId = ElectionsPhragmenModuleId; type Event = Event; type Currency = Balances; @@ -1201,13 +1163,13 @@ mod tests { type ChangeMembers = TestChangeMembers; type InitializeMembers = (); type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; + type VotingBondBase = VotingBondBase; + type VotingBondFactor = VotingBondFactor; type TermDuration = TermDuration; type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type LoserCandidate = (); type KickedMember = (); - type BadReport = (); type WeightInfo = (); } @@ -1227,61 +1189,56 @@ mod tests { ); pub struct ExtBuilder { - genesis_members: Vec<(u64, u64)>, balance_factor: u64, - voter_bond: u64, - term_duration: u64, - desired_runners_up: u32, - desired_members: u32, + genesis_members: Vec<(u64, u64)>, } impl Default for ExtBuilder { fn default() -> Self { Self { - genesis_members: vec![], balance_factor: 1, - voter_bond: 2, - term_duration: 5, - desired_runners_up: 0, - desired_members: 2, + genesis_members: vec![], } } } impl ExtBuilder { - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; + pub fn voter_bond(self, bond: u64) -> Self { + VOTING_BOND_BASE.with(|v| *v.borrow_mut() = bond); + self + } + pub fn voter_bond_factor(self, bond: u64) -> Self { + VOTING_BOND_FACTOR.with(|v| *v.borrow_mut() = bond); self } - pub fn desired_runners_up(mut self, count: u32) -> Self { - self.desired_runners_up = count; + pub fn desired_runners_up(self, count: u32) -> Self { + DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = count); self } - pub fn term_duration(mut self, duration: u64) -> Self { - self.term_duration = duration; + pub fn term_duration(self, duration: u64) -> Self { + TERM_DURATION.with(|v| *v.borrow_mut() = duration); self } pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { + MEMBERS.with(|m| { + *m.borrow_mut() = members + .iter() + .map(|(m, _)| m.clone()) + .collect::>() + }); self.genesis_members = members; self } - pub fn desired_members(mut self, count: u32) -> Self { - self.desired_members = count; + pub fn desired_members(self, count: u32) -> Self { + DESIRED_MEMBERS.with(|m| *m.borrow_mut() = count); self } pub fn balance_factor(mut self, factor: u64) -> Self { self.balance_factor = factor; self } - fn set_constants(&self) { - VOTING_BOND.with(|v| *v.borrow_mut() = self.voter_bond); - TERM_DURATION.with(|v| *v.borrow_mut() = self.term_duration); - DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = self.desired_runners_up); - DESIRED_MEMBERS.with(|m| *m.borrow_mut() = self.desired_members); - MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); - } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - self.set_constants(); + MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); let mut ext: sp_io::TestExternalities = GenesisConfig { pallet_balances: Some(pallet_balances::GenesisConfig::{ balances: vec![ @@ -1303,6 +1260,40 @@ mod tests { } } + fn candidate_ids() -> Vec { + Elections::candidates() + .into_iter() + .map(|(c, _)| c) + .collect::>() + } + + fn candidate_deposit(who: &u64) -> u64 { + Elections::candidates() + .into_iter() + .find_map(|(c, d)| if c == *who { Some(d) } else { None }) + .unwrap_or_default() + } + + fn voter_deposit(who: &u64) -> u64 { + Elections::voting(who).deposit + } + + fn runners_up_ids() -> Vec { + Elections::runners_up().into_iter().map(|r| r.who).collect::>() + } + + fn members_ids() -> Vec { + Elections::members_ids() + } + + fn members_and_stake() -> Vec<(u64, u64)> { + Elections::members().into_iter().map(|m| (m.who, m.stake)).collect::>() + } + + fn runners_up_and_stake() -> Vec<(u64, u64)> { + Elections::runners_up().into_iter().map(|r| (r.who, r.stake)).collect::>() + } + fn all_voters() -> Vec { Voting::::iter().map(|(v, _)| v).collect::>() } @@ -1312,9 +1303,15 @@ mod tests { } fn has_lock(who: &u64) -> u64 { - let lock = Balances::locks(who)[0].clone(); - assert_eq!(lock.id, ElectionsPhragmenModuleId::get()); - lock.amount + dbg!(Balances::locks(who)); + Balances::locks(who) + .get(0) + .cloned() + .map(|lock| { + assert_eq!(lock.id, ElectionsPhragmenModuleId::get()); + lock.amount + }) + .unwrap_or_default() } fn intersects(a: &[T], b: &[T]) -> bool { @@ -1323,41 +1320,41 @@ mod tests { fn ensure_members_sorted() { let mut members = Elections::members().clone(); - members.sort(); + members.sort_by_key(|m| m.who); assert_eq!(Elections::members(), members); } fn ensure_candidates_sorted() { let mut candidates = Elections::candidates().clone(); - candidates.sort(); + candidates.sort_by_key(|(c, _)| *c); assert_eq!(Elections::candidates(), candidates); } fn locked_stake_of(who: &u64) -> u64 { - Voting::::get(who).0 + Voting::::get(who).stake } fn ensure_members_has_approval_stake() { // we filter members that have no approval state. This means that even we have more seats // than candidates, we will never ever chose a member with no votes. - assert!( - Elections::members().iter().chain( - Elections::runners_up().iter() - ).all(|(_, s)| *s != u64::zero()) - ); + assert!(Elections::members() + .iter() + .chain(Elections::runners_up().iter()) + .all(|s| s.stake != u64::zero())); } fn ensure_member_candidates_runners_up_disjoint() { // members, candidates and runners-up must always be disjoint sets. - assert!(!intersects(&Elections::members_ids(), &Elections::candidates())); - assert!(!intersects(&Elections::members_ids(), &Elections::runners_up_ids())); - assert!(!intersects(&Elections::candidates(), &Elections::runners_up_ids())); + assert!(!intersects(&members_ids(), &candidate_ids())); + assert!(!intersects(&members_ids(), &runners_up_ids())); + assert!(!intersects(&candidate_ids(), &runners_up_ids())); } fn pre_conditions() { System::set_block_number(1); ensure_members_sorted(); ensure_candidates_sorted(); + ensure_member_candidates_runners_up_disjoint(); } fn post_conditions() { @@ -1380,28 +1377,24 @@ mod tests { } fn votes_of(who: &u64) -> Vec { - Voting::::get(who).1 - } - - fn defunct_for(who: u64) -> DefunctVoter { - DefunctVoter { - who, - candidate_count: Elections::candidates().len() as u32, - vote_count: votes_of(&who).len() as u32 - } + Voting::::get(who).votes } #[test] fn params_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::desired_members(), 2); - assert_eq!(Elections::term_duration(), 5); + assert_eq!(::DesiredMembers::get(), 2); + assert_eq!(::DesiredRunnersUp::get(), 0); + assert_eq!(::VotingBondBase::get(), 2); + assert_eq!(::VotingBondFactor::get(), 0); + assert_eq!(::CandidacyBond::get(), 3); + assert_eq!(::TermDuration::get(), 5); assert_eq!(Elections::election_rounds(), 0); assert!(Elections::members().is_empty()); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(>::decode_len(), None); assert!(Elections::is_candidate(&1).is_err()); @@ -1414,16 +1407,38 @@ mod tests { fn genesis_members_should_work() { ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| { System::set_block_number(1); - assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 1, stake: 10, deposit: 0 }, + SeatHolder { who: 2, stake: 20, deposit: 0 } + ] + ); - assert_eq!(Elections::voting(1), (10, vec![1])); - assert_eq!(Elections::voting(2), (20, vec![2])); + assert_eq!(Elections::voting(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 }); + assert_eq!(Elections::voting(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 }); // they will persist since they have self vote. System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![1, 2]); + }) + } + + #[test] + fn genesis_voters_can_remove_lock() { + ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| { + System::set_block_number(1); + + assert_eq!(Elections::voting(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 }); + assert_eq!(Elections::voting(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 }); - assert_eq!(Elections::members_ids(), vec![1, 2]); + assert_ok!(Elections::remove_voter(Origin::signed(1))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); + + assert_eq!(Elections::voting(1), Default::default()); + assert_eq!(Elections::voting(2), Default::default()); }) } @@ -1431,16 +1446,22 @@ mod tests { fn genesis_members_unsorted_should_work() { ExtBuilder::default().genesis_members(vec![(2, 20), (1, 10)]).build_and_execute(|| { System::set_block_number(1); - assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 1, stake: 10, deposit: 0 }, + SeatHolder { who: 2, stake: 20, deposit: 0 }, + ] + ); - assert_eq!(Elections::voting(1), (10, vec![1])); - assert_eq!(Elections::voting(2), (20, vec![2])); + assert_eq!(Elections::voting(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 }); + assert_eq!(Elections::voting(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 }); // they will persist since they have self vote. System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![1, 2]); + assert_eq!(members_ids(), vec![1, 2]); }) } @@ -1454,20 +1475,20 @@ mod tests { } #[test] - #[should_panic] - fn genesis_members_cannot_over_stake_1() { - // 10 cannot reserve 20 as voting bond and extra genesis will panic. + #[should_panic = "Duplicate member in elections-phragmen genesis: 2"] + fn genesis_members_cannot_be_duplicate() { ExtBuilder::default() - .voter_bond(20) - .genesis_members(vec![(1, 10), (2, 20)]) + .desired_members(3) + .genesis_members(vec![(1, 10), (2, 10), (2, 10)]) .build_and_execute(|| {}); } #[test] - #[should_panic = "Duplicate member in elections phragmen genesis: 2"] - fn genesis_members_cannot_be_duplicate() { + #[should_panic = "Cannot accept more than DesiredMembers genesis member"] + fn genesis_members_cannot_too_many() { ExtBuilder::default() - .genesis_members(vec![(1, 10), (2, 10), (2, 10)]) + .genesis_members(vec![(1, 10), (2, 10), (3, 30)]) + .desired_members(2) .build_and_execute(|| {}); } @@ -1477,27 +1498,27 @@ mod tests { .term_duration(0) .build_and_execute(|| { - assert_eq!(Elections::term_duration(), 0); - assert_eq!(Elections::desired_members(), 2); + assert_eq!(::TermDuration::get(), 0); + assert_eq!(::DesiredMembers::get(), 2); assert_eq!(Elections::election_rounds(), 0); - assert!(Elections::members_ids().is_empty()); + assert!(members_ids().is_empty()); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert!(Elections::members_ids().is_empty()); + assert!(members_ids().is_empty()); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); }); } #[test] fn simple_candidate_submission_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert!(Elections::is_candidate(&1).is_err()); assert!(Elections::is_candidate(&2).is_err()); @@ -1505,7 +1526,7 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(1))); assert_eq!(balances(&1), (7, 3)); - assert_eq!(Elections::candidates(), vec![1]); + assert_eq!(candidate_ids(), vec![1]); assert!(Elections::is_candidate(&1).is_ok()); assert!(Elections::is_candidate(&2).is_err()); @@ -1514,46 +1535,67 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(balances(&2), (17, 3)); - assert_eq!(Elections::candidates(), vec![1, 2]); + assert_eq!(candidate_ids(), vec![1, 2]); assert!(Elections::is_candidate(&1).is_ok()); assert!(Elections::is_candidate(&2).is_ok()); + + assert_eq!(candidate_deposit(&1), 3); + assert_eq!(candidate_deposit(&2), 3); + assert_eq!(candidate_deposit(&3), 0); }); } #[test] - fn simple_candidate_submission_with_no_votes_should_work() { + fn updating_candidacy_bond_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - - assert_ok!(submit_candidacy(Origin::signed(1))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_eq!(Elections::candidates(), vec![(5, 3)]); - assert!(Elections::is_candidate(&1).is_ok()); - assert!(Elections::is_candidate(&2).is_ok()); - assert_eq!(Elections::candidates(), vec![1, 2]); + // a runtime upgrade changes the bond. + CANDIDACY_BOND.with(|v| *v.borrow_mut() = 4); - assert!(Elections::members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_eq!(Elections::candidates(), vec![(4, 4), (5, 3)]); + // once elected, they each hold their candidacy bond, no more. System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert!(Elections::is_candidate(&1).is_err()); - assert!(Elections::is_candidate(&2).is_err()); - assert!(Elections::candidates().is_empty()); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 4, stake: 40, deposit: 4 }, + SeatHolder { who: 5, stake: 50, deposit: 3 }, + ] + ); + }) + } - assert!(Elections::members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + #[test] + fn candidates_are_always_sorted() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(candidate_ids(), Vec::::new()); + + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_eq!(candidate_ids(), vec![3]); + assert_ok!(submit_candidacy(Origin::signed(1))); + assert_eq!(candidate_ids(), vec![1, 3]); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_eq!(candidate_ids(), vec![1, 2, 3]); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_eq!(candidate_ids(), vec![1, 2, 3, 4]); }); } #[test] fn dupe_candidate_submission_should_not_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_ok!(submit_candidacy(Origin::signed(1))); - assert_eq!(Elections::candidates(), vec![1]); + assert_eq!(candidate_ids(), vec![1]); assert_noop!( submit_candidacy(Origin::signed(1)), Error::::DuplicatedCandidate, @@ -1569,11 +1611,11 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![5], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); + assert_eq!(members_ids(), vec![5]); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_noop!( submit_candidacy(Origin::signed(5)), @@ -1593,14 +1635,14 @@ mod tests { assert_ok!(vote(Origin::signed(1), vec![3], 10)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); assert_noop!( submit_candidacy(Origin::signed(3)), - Error::::RunnerSubmit, + Error::::RunnerUpSubmit, ); }); } @@ -1608,7 +1650,7 @@ mod tests { #[test] fn poor_candidate_submission_should_not_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_noop!( submit_candidacy(Origin::signed(7)), Error::::InsufficientCandidateFunds, @@ -1619,7 +1661,7 @@ mod tests { #[test] fn simple_voting_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); assert_ok!(submit_candidacy(Origin::signed(5))); @@ -1633,7 +1675,7 @@ mod tests { #[test] fn can_vote_with_custom_stake() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); assert_ok!(submit_candidacy(Origin::signed(5))); @@ -1665,6 +1707,74 @@ mod tests { }); } + #[test] + fn updated_voting_bond_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + + assert_eq!(balances(&2), (20, 0)); + assert_ok!(vote(Origin::signed(2), vec![5], 5)); + assert_eq!(balances(&2), (18, 2)); + assert_eq!(voter_deposit(&2), 2); + + // a runtime upgrade lowers the voting bond to 1. This guy still un-reserves 2 when + // leaving. + VOTING_BOND_BASE.with(|v| *v.borrow_mut() = 1); + + // proof that bond changed. + assert_eq!(balances(&1), (10, 0)); + assert_ok!(vote(Origin::signed(1), vec![5], 5)); + assert_eq!(balances(&1), (9, 1)); + assert_eq!(voter_deposit(&1), 1); + + assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_eq!(balances(&2), (20, 0)); + }) + } + + #[test] + fn voting_reserves_bond_per_vote() { + ExtBuilder::default().voter_bond_factor(1).build_and_execute(|| { + assert_eq!(balances(&2), (20, 0)); + + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + + // initial vote. + assert_ok!(vote(Origin::signed(2), vec![5], 10)); + + // 2 + 1 + assert_eq!(balances(&2), (17, 3)); + assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(has_lock(&2), 10); + assert_eq!(locked_stake_of(&2), 10); + + // can update; different stake; different lock and reserve. + assert_ok!(vote(Origin::signed(2), vec![5, 4], 15)); + // 2 + 2 + assert_eq!(balances(&2), (16, 4)); + assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(has_lock(&2), 15); + assert_eq!(locked_stake_of(&2), 15); + + // stay at two votes with different stake. + assert_ok!(vote(Origin::signed(2), vec![5, 3], 18)); + // 2 + 2 + assert_eq!(balances(&2), (16, 4)); + assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(has_lock(&2), 18); + assert_eq!(locked_stake_of(&2), 18); + + // back to 1 vote. + assert_ok!(vote(Origin::signed(2), vec![4], 12)); + // 2 + 1 + assert_eq!(balances(&2), (17, 3)); + assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(has_lock(&2), 12); + assert_eq!(locked_stake_of(&2), 12); + }); + } + #[test] fn cannot_vote_for_no_candidate() { ExtBuilder::default().build_and_execute(|| { @@ -1684,10 +1794,10 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); + assert_eq!(members_ids(), vec![4, 5]); + assert!(candidate_ids().is_empty()); assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); }); @@ -1707,10 +1817,10 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); + assert_eq!(members_ids(), vec![4, 5]); + assert!(candidate_ids().is_empty()); assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); @@ -1718,28 +1828,73 @@ mod tests { } #[test] - fn prime_votes_for_exiting_members_are_removed() { + fn prime_votes_for_exiting_members_are_removed() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + + assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![3, 5]); + assert!(candidate_ids().is_empty()); + + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + }); + } + + #[test] + fn prime_is_kept_if_other_members_leave() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + assert_ok!(Elections::renounce_candidacy( + Origin::signed(4), + Renouncing::Member + )); + + assert_eq!(members_ids(), vec![5]); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + }) + } + + #[test] + fn prime_is_gone_if_renouncing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(submit_candidacy(Origin::signed(4))); assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert!(Elections::candidates().is_empty()); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - }); + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member)); + + assert_eq!(members_ids(), vec![4]); + assert_eq!(PRIME.with(|p| *p.borrow()), None); + }) } #[test] @@ -1765,7 +1920,7 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // now we have 2 members, 1 runner-up, and 1 new candidate assert_ok!(submit_candidacy(Origin::signed(2))); @@ -1863,172 +2018,9 @@ mod tests { assert_ok!(Elections::remove_voter(Origin::signed(4))); System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![3, 5]); - }); - } - - #[test] - fn reporter_must_be_voter() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!( - Elections::report_defunct_voter(Origin::signed(1), defunct_for(2)), - Error::::MustBeVoter, - ); - }); - } - - #[test] - fn reporter_must_provide_lengths() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - // both are defunct. - assert_ok!(vote(Origin::signed(5), vec![99, 999, 9999], 50)); - assert_ok!(vote(Origin::signed(4), vec![999], 40)); - - // 3 candidates! incorrect candidate length. - assert_noop!( - Elections::report_defunct_voter(Origin::signed(4), DefunctVoter { - who: 5, - candidate_count: 2, - vote_count: 3, - }), - Error::::InvalidCandidateCount, - ); - - // 3 votes! incorrect vote length - assert_noop!( - Elections::report_defunct_voter(Origin::signed(4), DefunctVoter { - who: 5, - candidate_count: 3, - vote_count: 2, - }), - Error::::InvalidVoteCount, - ); - - // correct. - assert_ok!(Elections::report_defunct_voter(Origin::signed(4), DefunctVoter { - who: 5, - candidate_count: 3, - vote_count: 3, - })); - }); - } - - #[test] - fn reporter_can_overestimate_length() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - // both are defunct. - assert_ok!(vote(Origin::signed(5), vec![99], 50)); - assert_ok!(vote(Origin::signed(4), vec![999], 40)); - - // 2 candidates! overestimation is okay. - assert_ok!(Elections::report_defunct_voter(Origin::signed(4), defunct_for(5))); - }); - } - - #[test] - fn can_detect_defunct_voter() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(6))); - - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); - assert_ok!(vote(Origin::signed(6), vec![6], 30)); - // will be soon a defunct voter. - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![6]); - assert!(Elections::candidates().is_empty()); - - // all of them have a member or runner-up that they voted for. - assert_eq!(Elections::is_defunct_voter(&votes_of(&5)), false); - assert_eq!(Elections::is_defunct_voter(&votes_of(&4)), false); - assert_eq!(Elections::is_defunct_voter(&votes_of(&2)), false); - assert_eq!(Elections::is_defunct_voter(&votes_of(&6)), false); - - // defunct - assert_eq!(Elections::is_defunct_voter(&votes_of(&3)), true); - - assert_ok!(submit_candidacy(Origin::signed(1))); - assert_ok!(vote(Origin::signed(1), vec![1], 10)); - - // has a candidate voted for. - assert_eq!(Elections::is_defunct_voter(&votes_of(&1)), false); - - }); - } - - #[test] - fn report_voter_should_work_and_earn_reward() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); - // will be soon a defunct voter. - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); - - assert_eq!(balances(&3), (28, 2)); - assert_eq!(balances(&5), (45, 5)); - - assert_ok!(Elections::report_defunct_voter(Origin::signed(5), defunct_for(3))); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::VoterReported(3, 5, true)) - })); - - assert_eq!(balances(&3), (28, 0)); - assert_eq!(balances(&5), (47, 5)); - }); - } - - #[test] - fn report_voter_should_slash_when_bad_report() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); - - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 5)); - - assert_ok!(Elections::report_defunct_voter(Origin::signed(5), defunct_for(4))); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::VoterReported(4, 5, false)) - })); - - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 3)); + assert_eq!(members_ids(), vec![3, 5]); }); } @@ -2049,18 +2041,19 @@ mod tests { assert_eq!(votes_of(&3), vec![3]); assert_eq!(votes_of(&4), vec![4]); - assert_eq!(Elections::candidates(), vec![3, 4, 5]); + assert_eq!(candidate_ids(), vec![3, 4, 5]); assert_eq!(>::decode_len().unwrap(), 3); assert_eq!(Elections::election_rounds(), 0); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members(), vec![(3, 30), (5, 20)]); + assert_eq!(members_and_stake(), vec![(3, 30), (5, 20)]); assert!(Elections::runners_up().is_empty()); + assert_eq_uvec!(all_voters(), vec![2, 3, 4]); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(>::decode_len(), None); assert_eq!(Elections::election_rounds(), 1); @@ -2072,7 +2065,7 @@ mod tests { ExtBuilder::default().build_and_execute(|| { // no candidates, no nothing. System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); assert_eq!( System::events().iter().last().unwrap().event, @@ -2091,21 +2084,21 @@ mod tests { assert_ok!(vote(Origin::signed(4), vec![4], 40)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); assert_eq!( System::events().iter().last().unwrap().event, Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])), ); - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![]); + assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); + assert_eq!(runners_up_and_stake(), vec![]); assert_ok!(Elections::remove_voter(Origin::signed(5))); assert_ok!(Elections::remove_voter(Origin::signed(4))); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); assert_eq!( System::events().iter().last().unwrap().event, @@ -2128,19 +2121,19 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members(), vec![(5, 50)]); + assert_eq!(members_and_stake(), vec![(5, 50)]); assert_eq!(Elections::election_rounds(), 1); // but now it has a valid target. assert_ok!(submit_candidacy(Origin::signed(4))); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // candidate 4 is affected by an old vote. - assert_eq!(Elections::members(), vec![(4, 30), (5, 50)]); + assert_eq!(members_and_stake(), vec![(4, 30), (5, 50)]); assert_eq!(Elections::election_rounds(), 2); assert_eq_uvec!(all_voters(), vec![3, 5]); }); @@ -2160,10 +2153,10 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); assert_eq!(Elections::election_rounds(), 1); - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2174,11 +2167,11 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(4))); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(Elections::election_rounds(), 1); - assert!(Elections::members_ids().is_empty()); + assert!(members_ids().is_empty()); assert_eq!( System::events().iter().last().unwrap().event, @@ -2201,11 +2194,11 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![4], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // sorted based on account id. - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); // sorted based on merit (least -> most) - assert_eq!(Elections::runners_up_ids(), vec![3, 2]); + assert_eq!(runners_up_ids(), vec![3, 2]); // runner ups are still locked. assert_eq!(balances(&4), (35, 5)); @@ -2228,16 +2221,17 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); + assert_eq!(runners_up_and_stake(), vec![(2, 20), (3, 30)]); assert_ok!(vote(Origin::signed(5), vec![5], 15)); System::set_block_number(10); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members(), vec![(3, 30), (4, 40)]); - assert_eq!(Elections::runners_up(), vec![(5, 15), (2, 20)]); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_and_stake(), vec![(3, 30), (4, 40)]); + assert_eq!(runners_up_and_stake(), vec![(5, 15), (2, 20)]); }); } @@ -2253,18 +2247,18 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2]); assert_eq!(balances(&2), (15, 5)); assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(runners_up_ids(), vec![3]); assert_eq!(balances(&2), (15, 2)); }); } @@ -2281,22 +2275,22 @@ mod tests { assert_eq!(balances(&5), (45, 5)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![5]); assert_ok!(Elections::remove_voter(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); System::set_block_number(10); - Elections::end_block(System::block_number()); - assert!(Elections::members_ids().is_empty()); + Elections::on_initialize(System::block_number()); + assert!(members_ids().is_empty()); assert_eq!(balances(&5), (47, 0)); }); } #[test] - fn losers_will_lose_the_bond() { + fn candidates_lose_the_bond_when_outgoing() { ExtBuilder::default().build_and_execute(|| { assert_ok!(submit_candidacy(Origin::signed(5))); assert_ok!(submit_candidacy(Origin::signed(3))); @@ -2307,9 +2301,9 @@ mod tests { assert_eq!(balances(&3), (27, 3)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); + assert_eq!(members_ids(), vec![5]); // winner assert_eq!(balances(&5), (47, 3)); @@ -2328,9 +2322,9 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); assert_eq!(Elections::election_rounds(), 1); assert_ok!(submit_candidacy(Origin::signed(2))); @@ -2342,13 +2336,13 @@ mod tests { assert_ok!(Elections::remove_voter(Origin::signed(4))); // 5 will persist as candidates despite not being in the list. - assert_eq!(Elections::candidates(), vec![2, 3]); + assert_eq!(candidate_ids(), vec![2, 3]); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // 4 removed; 5 and 3 are the new best. - assert_eq!(Elections::members_ids(), vec![3, 5]); + assert_eq!(members_ids(), vec![3, 5]); }); } @@ -2369,12 +2363,12 @@ mod tests { let check_at_block = |b: u32| { System::set_block_number(b.into()); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // we keep re-electing the same folks. - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); + assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); + assert_eq!(runners_up_and_stake(), vec![(2, 20), (3, 30)]); // no new candidates but old members and runners-up are always added. - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(Elections::election_rounds(), b / 5); assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); }; @@ -2397,8 +2391,8 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); assert_eq!(Elections::election_rounds(), 1); // a new candidate @@ -2409,7 +2403,7 @@ mod tests { assert_eq!(balances(&4), (35, 2)); // slashed assert_eq!(Elections::election_rounds(), 2); // new election round - assert_eq!(Elections::members_ids(), vec![3, 5]); // new members + assert_eq!(members_ids(), vec![3, 5]); // new members }); } @@ -2423,14 +2417,21 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); // no replacement yet. - assert_err_with_weight!( - Elections::remove_member(Origin::root(), 4, true), - Error::::InvalidReplacement, - Some(33489000), // only thing that matters for now is that it is NOT the full block. + let unwrapped_error = Elections::remove_member(Origin::root(), 4, true).unwrap_err(); + matches!( + unwrapped_error.error, + DispatchError::Module { + message: Some("InvalidReplacement"), + .. + } + ); + matches!( + unwrapped_error.post_info.actual_weight, + Some(x) if x < ::BlockWeights::get().max_block ); }); @@ -2444,15 +2445,22 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); // there is a replacement! and this one needs a weight refund. - assert_err_with_weight!( - Elections::remove_member(Origin::root(), 4, false), - Error::::InvalidReplacement, - Some(33489000) // only thing that matters for now is that it is NOT the full block. + let unwrapped_error = Elections::remove_member(Origin::root(), 4, false).unwrap_err(); + matches!( + unwrapped_error.error, + DispatchError::Module { + message: Some("InvalidReplacement"), + .. + } + ); + matches!( + unwrapped_error.post_info.actual_weight, + Some(x) if x < ::BlockWeights::get().max_block ); }); } @@ -2474,8 +2482,8 @@ mod tests { assert_eq!(Elections::election_rounds(), 0); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![3, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![3, 5]); assert_eq!(Elections::election_rounds(), 1); assert_ok!(Elections::remove_voter(Origin::signed(2))); @@ -2485,8 +2493,8 @@ mod tests { // meanwhile, no one cares to become a candidate again. System::set_block_number(10); - Elections::end_block(System::block_number()); - assert!(Elections::members_ids().is_empty()); + Elections::on_initialize(System::block_number()); + assert!(members_ids().is_empty()); assert_eq!(Elections::election_rounds(), 2); }); } @@ -2501,8 +2509,8 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); assert_ok!(submit_candidacy(Origin::signed(1))); assert_ok!(submit_candidacy(Origin::signed(2))); @@ -2519,10 +2527,10 @@ mod tests { assert_ok!(vote(Origin::signed(1), vec![1], 10)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // 3, 4 are new members, must still be bonded, nothing slashed. - assert_eq!(Elections::members(), vec![(3, 30), (4, 48)]); + assert_eq!(members_and_stake(), vec![(3, 30), (4, 48)]); assert_eq!(balances(&3), (25, 5)); assert_eq!(balances(&4), (35, 5)); @@ -2549,9 +2557,9 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![10], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq_uvec!(Elections::members_ids(), vec![3, 4]); + assert_eq_uvec!(members_ids(), vec![3, 4]); assert_eq!(Elections::election_rounds(), 1); }); } @@ -2570,48 +2578,34 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![4], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // id: low -> high. - assert_eq!(Elections::members(), vec![(4, 50), (5, 40)]); + assert_eq!(members_and_stake(), vec![(4, 50), (5, 40)]); // merit: low -> high. - assert_eq!(Elections::runners_up(), vec![(3, 20), (2, 30)]); + assert_eq!(runners_up_and_stake(), vec![(3, 20), (2, 30)]); }); } - #[test] - fn candidates_are_sorted() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - assert_eq!(Elections::candidates(), vec![3, 5]); - - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(4))); - - assert_eq!(Elections::candidates(), vec![2, 4, 5]); - }) - } - #[test] fn runner_up_replacement_maintains_members_order() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); assert_ok!(submit_candidacy(Origin::signed(2))); assert_ok!(vote(Origin::signed(2), vec![5], 20)); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(5), vec![2], 50)); - System::set_block_number(5); - Elections::end_block(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(Origin::root(), 2, true)); - assert_eq!(Elections::members_ids(), vec![4, 5]); - }); + assert_eq!(members_ids(), vec![2, 4]); + assert_ok!(Elections::remove_member(Origin::root(), 2, true)); + assert_eq!(members_ids(), vec![4, 5]); + }); } #[test] @@ -2628,16 +2622,16 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2, 3]); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); + assert_eq!(members_ids(), vec![3, 5]); + assert_eq!(runners_up_ids(), vec![2]); }) } @@ -2651,26 +2645,28 @@ mod tests { assert_ok!(vote(Origin::signed(4), vec![4], 40)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::runners_up_ids().is_empty()); + assert_eq!(members_ids(), vec![4, 5]); + assert!(runners_up_ids().is_empty()); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. // no replacement - assert_eq!(Elections::members_ids(), vec![5]); - assert!(Elections::runners_up_ids().is_empty()); + assert_eq!(members_ids(), vec![5]); + assert!(runners_up_ids().is_empty()); }) } #[test] - fn can_renounce_candidacy_runner() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + fn can_renounce_candidacy_runner_up() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(submit_candidacy(Origin::signed(2))); assert_ok!(vote(Origin::signed(5), vec![4], 50)); @@ -2678,18 +2674,21 @@ mod tests { assert_ok!(vote(Origin::signed(3), vec![3], 30)); assert_ok!(vote(Origin::signed(2), vec![2], 20)); - System::set_block_number(5); - Elections::end_block(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2, 3]); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); - assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. + assert_ok!(Elections::renounce_candidacy( + Origin::signed(3), + Renouncing::RunnerUp + )); + assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); - }) + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2]); + }) } #[test] @@ -2706,13 +2705,13 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![2], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_eq!(Elections::runners_up_ids(), vec![5, 3]); + assert_eq!(members_ids(), vec![2, 4]); + assert_eq!(runners_up_ids(), vec![5, 3]); assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_eq!(Elections::runners_up_ids(), vec![5]); + assert_eq!(members_ids(), vec![2, 4]); + assert_eq!(runners_up_ids(), vec![5]); }); } @@ -2721,11 +2720,11 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_ok!(submit_candidacy(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); - assert_eq!(Elections::candidates(), vec![5]); + assert_eq!(candidate_ids(), vec![5]); assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(1))); assert_eq!(balances(&5), (50, 0)); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); }) } @@ -2738,7 +2737,7 @@ mod tests { ); assert_noop!( Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member), - Error::::NotMember, + Error::::InvalidRenouncing, ); assert_noop!( Elections::renounce_candidacy(Origin::signed(5), Renouncing::RunnerUp), @@ -2759,14 +2758,14 @@ mod tests { assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); assert_noop!( Elections::renounce_candidacy(Origin::signed(3), Renouncing::Member), - Error::::NotMember, + Error::::InvalidRenouncing, ); }) } @@ -2783,10 +2782,10 @@ mod tests { assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); assert_noop!( Elections::renounce_candidacy(Origin::signed(4), Renouncing::RunnerUp), @@ -2804,7 +2803,7 @@ mod tests { assert_noop!( Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2)), - Error::::InvalidRenouncing, + Error::::InvalidWitnessData, ); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); @@ -2822,25 +2821,6 @@ mod tests { }) } - #[test] - fn behavior_with_dupe_candidate() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - >::put(vec![1, 1, 2, 3, 4]); - - assert_ok!(vote(Origin::signed(5), vec![1], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![1, 4]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); - assert!(Elections::candidates().is_empty()); - }) - } - #[test] fn unsorted_runners_up_are_detected() { ExtBuilder::default().desired_runners_up(2).desired_members(1).build_and_execute(|| { @@ -2848,25 +2828,24 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(4))); assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); assert_ok!(vote(Origin::signed(4), vec![4], 5)); assert_ok!(vote(Origin::signed(3), vec![3], 15)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up_ids(), vec![4, 3]); + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![4, 3]); assert_ok!(submit_candidacy(Origin::signed(2))); assert_ok!(vote(Origin::signed(2), vec![2], 10)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![2, 3]); // 4 is outgoing runner-up. Slash candidacy bond. assert_eq!(balances(&4), (35, 2)); @@ -2888,10 +2867,10 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4]); + assert_eq!(runners_up_ids(), vec![2, 3]); assert_eq!(balances(&4), (35, 5)); assert_eq!(balances(&3), (25, 5)); @@ -2902,10 +2881,10 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up_ids(), vec![3, 4]); + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![3, 4]); // 4 went from member to runner-up -- don't slash. assert_eq!(balances(&4), (35, 5)); @@ -2929,10 +2908,10 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4]); + assert_eq!(runners_up_ids(), vec![2, 3]); assert_eq!(balances(&4), (35, 5)); assert_eq!(balances(&3), (25, 5)); @@ -2943,10 +2922,10 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![4], 20)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![2]); - assert_eq!(Elections::runners_up_ids(), vec![4, 3]); + assert_eq!(members_ids(), vec![2]); + assert_eq!(runners_up_ids(), vec![4, 3]); // 2 went from runner to member, don't slash assert_eq!(balances(&2), (15, 5)); @@ -2956,4 +2935,166 @@ mod tests { assert_eq!(balances(&3), (25, 5)); }); } + + #[test] + fn remove_and_replace_member_works() { + let setup = || { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); + }; + + // member removed, replacement found. + ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { + setup(); + assert_eq!(Elections::remove_and_replace_member(&4, false), Ok(true)); + + assert_eq!(members_ids(), vec![3, 5]); + assert_eq!(runners_up_ids().len(), 0); + }); + + // member removed, no replacement found. + ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { + setup(); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); + assert_eq!(Elections::remove_and_replace_member(&4, false), Ok(false)); + + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids().len(), 0); + }); + + // wrong member to remove. + ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { + setup(); + assert!(matches!(Elections::remove_and_replace_member(&2, false), Err(_))); + }); + } + + #[test] + fn no_desired_members() { + // not interested in anything + ExtBuilder::default().desired_members(0).desired_runners_up(0).build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_eq!(Elections::candidates().len(), 3); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids().len(), 0); + assert_eq!(runners_up_ids().len(), 0); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); + + // not interested in members + ExtBuilder::default().desired_members(0).desired_runners_up(2).build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_eq!(Elections::candidates().len(), 3); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids().len(), 0); + assert_eq!(runners_up_ids(), vec![3, 4]); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); + + // not interested in runners-up + ExtBuilder::default().desired_members(2).desired_runners_up(0).build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_eq!(Elections::candidates().len(), 3); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![3, 4]); + assert_eq!(runners_up_ids().len(), 0); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); + } + + #[test] + fn dupe_vote_is_moot() { + ExtBuilder::default().desired_members(1).build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(1))); + + // all these duplicate votes will not cause 2 to win. + assert_ok!(vote(Origin::signed(1), vec![2, 2, 2, 2], 5)); + assert_ok!(vote(Origin::signed(2), vec![2, 2, 2, 2], 20)); + + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![3]); + }) + } + + #[test] + fn remove_defunct_voter_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + + // defunct + assert_ok!(vote(Origin::signed(5), vec![5, 4], 5)); + // defunct + assert_ok!(vote(Origin::signed(4), vec![4], 5)); + // ok + assert_ok!(vote(Origin::signed(3), vec![3], 5)); + // ok + assert_ok!(vote(Origin::signed(2), vec![3, 4], 5)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(3))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(1))); + + assert_ok!(Elections::clean_defunct_voters(Origin::root(), 4, 2)); + }) + } } diff --git a/frame/elections-phragmen/src/migrations_3_0_0.rs b/frame/elections-phragmen/src/migrations_3_0_0.rs new file mode 100644 index 0000000000000..0737a12207c11 --- /dev/null +++ b/frame/elections-phragmen/src/migrations_3_0_0.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations to version [`3.0.0`], as denoted by the changelog. + +use codec::{Encode, Decode, FullCodec}; +use sp_std::prelude::*; +use frame_support::{ + RuntimeDebug, weights::Weight, Twox64Concat, + storage::types::{StorageMap, StorageValue}, + traits::{GetPalletVersion, PalletVersion}, +}; + +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] +struct SeatHolder { + who: AccountId, + stake: Balance, + deposit: Balance, +} + +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] +struct Voter { + votes: Vec, + stake: Balance, + deposit: Balance, +} + +/// Trait to implement to give information about types used for migration +pub trait V2ToV3 { + /// elections-phragmen module, used to check storage version. + type Module: GetPalletVersion; + + /// System config account id + type AccountId: 'static + FullCodec; + + /// Elections-phragmen currency balance. + type Balance: 'static + FullCodec + Copy; +} + +struct __Candidates; +impl frame_support::traits::StorageInstance for __Candidates { + fn pallet_prefix() -> &'static str { "PhragmenElection" } + const STORAGE_PREFIX: &'static str = "Candidates"; +} + +#[allow(type_alias_bounds)] +type Candidates = StorageValue<__Candidates, Vec<(T::AccountId, T::Balance)>>; + +struct __Members; +impl frame_support::traits::StorageInstance for __Members { + fn pallet_prefix() -> &'static str { "PhragmenElection" } + const STORAGE_PREFIX: &'static str = "Members"; +} +#[allow(type_alias_bounds)] +type Members = StorageValue<__Members, Vec>>; + +struct __RunnersUp; +impl frame_support::traits::StorageInstance for __RunnersUp { + fn pallet_prefix() -> &'static str { "PhragmenElection" } + const STORAGE_PREFIX: &'static str = "RunnersUp"; +} +#[allow(type_alias_bounds)] +type RunnersUp = StorageValue<__RunnersUp, Vec>>; + +struct __Voting; +impl frame_support::traits::StorageInstance for __Voting { + fn pallet_prefix() -> &'static str { "PhragmenElection" } + const STORAGE_PREFIX: &'static str = "Voting"; +} +#[allow(type_alias_bounds)] +type Voting = StorageMap<__Voting, Twox64Concat, T::AccountId, Voter>; + +/// Apply all of the migrations from 2_0_0 to 3_0_0. +/// +/// ### Warning +/// +/// This code will **ONLY** check that the storage version is less than or equal to 2_0_0. +/// Further check might be needed at the user runtime. +/// +/// Be aware that this migration is intended to be used only for the mentioned versions. Use +/// with care and run at your own risk. +pub fn apply(old_voter_bond: T::Balance, old_candidacy_bond: T::Balance) -> Weight { + let maybe_storage_version = ::storage_version(); + frame_support::debug::info!( + "Running migration for elections-phragmen with storage version {:?}", + maybe_storage_version + ); + match maybe_storage_version { + Some(storage_version) if storage_version <= PalletVersion::new(2, 0, 0) => { + migrate_voters_to_recorded_deposit::(old_voter_bond); + migrate_candidates_to_recorded_deposit::(old_candidacy_bond); + migrate_runners_up_to_recorded_deposit::(old_candidacy_bond); + migrate_members_to_recorded_deposit::(old_candidacy_bond); + Weight::max_value() + } + _ => { + frame_support::debug::warn!( + "Attempted to apply migration to V3 but failed because storage version is {:?}", + maybe_storage_version + ); + 0 + }, + } +} + +/// Migrate from the old legacy voting bond (fixed) to the new one (per-vote dynamic). +pub fn migrate_voters_to_recorded_deposit(old_deposit: T::Balance) { + >::translate::<(T::Balance, Vec), _>( + |_who, (stake, votes)| { + Some(Voter { + votes, + stake, + deposit: old_deposit, + }) + }, + ); + + frame_support::debug::info!( + "migrated {} voter accounts.", + >::iter().count(), + ); +} + +/// Migrate all candidates to recorded deposit. +pub fn migrate_candidates_to_recorded_deposit(old_deposit: T::Balance) { + let _ = >::translate::, _>( + |maybe_old_candidates| { + maybe_old_candidates.map(|old_candidates| { + frame_support::debug::info!( + "migrated {} candidate accounts.", + old_candidates.len() + ); + old_candidates + .into_iter() + .map(|c| (c, old_deposit)) + .collect::>() + }) + }, + ); +} + +/// Migrate all members to recorded deposit. +pub fn migrate_members_to_recorded_deposit(old_deposit: T::Balance) { + let _ = >::translate::, _>( + |maybe_old_members| { + maybe_old_members.map(|old_members| { + frame_support::debug::info!("migrated {} member accounts.", old_members.len()); + old_members + .into_iter() + .map(|(who, stake)| SeatHolder { + who, + stake, + deposit: old_deposit, + }) + .collect::>() + }) + }, + ); +} + +/// Migrate all runners-up to recorded deposit. +pub fn migrate_runners_up_to_recorded_deposit(old_deposit: T::Balance) { + let _ = >::translate::, _>( + |maybe_old_runners_up| { + maybe_old_runners_up.map(|old_runners_up| { + frame_support::debug::info!( + "migrated {} runner-up accounts.", + old_runners_up.len() + ); + old_runners_up + .into_iter() + .map(|(who, stake)| SeatHolder { + who, + stake, + deposit: old_deposit, + }) + .collect::>() + }) + }, + ); +} diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 2702aec0a01cb..25c2091408361 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_elections_phragmen -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_elections_phragmen +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 +//! DATE: 2021-01-20, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -43,173 +44,187 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_elections_phragmen. pub trait WeightInfo { - fn vote(_v: u32, ) -> Weight; - fn vote_update(_v: u32, ) -> Weight; + fn vote_equal(v: u32, ) -> Weight; + fn vote_more(v: u32, ) -> Weight; + fn vote_less(v: u32, ) -> Weight; fn remove_voter() -> Weight; - fn report_defunct_voter_correct(_c: u32, _v: u32, ) -> Weight; - fn report_defunct_voter_incorrect(_c: u32, _v: u32, ) -> Weight; - fn submit_candidacy(_c: u32, ) -> Weight; - fn renounce_candidacy_candidate(_c: u32, ) -> Weight; + fn submit_candidacy(c: u32, ) -> Weight; + fn renounce_candidacy_candidate(c: u32, ) -> Weight; fn renounce_candidacy_members() -> Weight; fn renounce_candidacy_runners_up() -> Weight; fn remove_member_with_replacement() -> Weight; fn remove_member_wrong_refund() -> Weight; - + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight; + fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight; } /// Weights for pallet_elections_phragmen using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - fn vote(v: u32, ) -> Weight { - (89_627_000 as Weight) - .saturating_add((197_000 as Weight).saturating_mul(v as Weight)) +impl WeightInfo for SubstrateWeight { + fn vote_equal(v: u32, ) -> Weight { + (45_157_000 as Weight) + // Standard Error: 6_000 + .saturating_add((399_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn vote_update(v: u32, ) -> Weight { - (54_724_000 as Weight) - .saturating_add((213_000 as Weight).saturating_mul(v as Weight)) + fn vote_more(v: u32, ) -> Weight { + (69_738_000 as Weight) + // Standard Error: 14_000 + .saturating_add((450_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn remove_voter() -> Weight { - (73_774_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + fn vote_less(v: u32, ) -> Weight { + (73_955_000 as Weight) + // Standard Error: 38_000 + .saturating_add((227_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_746_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_383_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_725_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_293_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) + fn remove_voter() -> Weight { + (68_398_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn submit_candidacy(c: u32, ) -> Weight { - (73_403_000 as Weight) - .saturating_add((314_000 as Weight).saturating_mul(c as Weight)) + (59_291_000 as Weight) + // Standard Error: 2_000 + .saturating_add((412_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (48_834_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(c as Weight)) + (55_026_000 as Weight) + // Standard Error: 2_000 + .saturating_add((207_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn renounce_candidacy_members() -> Weight { - (78_402_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + (77_840_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } fn renounce_candidacy_runners_up() -> Weight { - (49_054_000 as Weight) + (54_559_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn remove_member_with_replacement() -> Weight { - (75_421_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (84_311_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } fn remove_member_wrong_refund() -> Weight { - (8_489_000 as Weight) + (7_677_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } - + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 55_000 + .saturating_add((114_815_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 53_000 + .saturating_add((49_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 1_940_000 + .saturating_add((43_557_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 807_000 + .saturating_add((65_849_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 55_000 + .saturating_add((4_206_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + } } // For backwards compatibility and tests impl WeightInfo for () { - fn vote(v: u32, ) -> Weight { - (89_627_000 as Weight) - .saturating_add((197_000 as Weight).saturating_mul(v as Weight)) + fn vote_equal(v: u32, ) -> Weight { + (45_157_000 as Weight) + // Standard Error: 6_000 + .saturating_add((399_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - fn vote_update(v: u32, ) -> Weight { - (54_724_000 as Weight) - .saturating_add((213_000 as Weight).saturating_mul(v as Weight)) + fn vote_more(v: u32, ) -> Weight { + (69_738_000 as Weight) + // Standard Error: 14_000 + .saturating_add((450_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - fn remove_voter() -> Weight { - (73_774_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + fn vote_less(v: u32, ) -> Weight { + (73_955_000 as Weight) + // Standard Error: 38_000 + .saturating_add((227_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_746_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_383_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_725_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_293_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + fn remove_voter() -> Weight { + (68_398_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn submit_candidacy(c: u32, ) -> Weight { - (73_403_000 as Weight) - .saturating_add((314_000 as Weight).saturating_mul(c as Weight)) + (59_291_000 as Weight) + // Standard Error: 2_000 + .saturating_add((412_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (48_834_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(c as Weight)) + (55_026_000 as Weight) + // Standard Error: 2_000 + .saturating_add((207_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn renounce_candidacy_members() -> Weight { - (78_402_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + (77_840_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } fn renounce_candidacy_runners_up() -> Weight { - (49_054_000 as Weight) + (54_559_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn remove_member_with_replacement() -> Weight { - (75_421_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (84_311_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - } fn remove_member_wrong_refund() -> Weight { - (8_489_000 as Weight) + (7_677_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - } - + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 55_000 + .saturating_add((114_815_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 53_000 + .saturating_add((49_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 1_940_000 + .saturating_add((43_557_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 807_000 + .saturating_add((65_849_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 55_000 + .saturating_add((4_206_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + } } diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index f0281a3033dd2..a13c6d7567f06 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index dccc42f24417b..6eaa2dfad3732 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -139,9 +139,9 @@ pub const VOTER_SET_SIZE: usize = 64; /// NUmber of approvals grouped in one chunk. pub const APPROVAL_SET_SIZE: usize = 8; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// Index used to access chunks. type SetIndex = u32; @@ -152,8 +152,8 @@ type ApprovalFlag = u32; /// Number of approval flags that can fit into [`ApprovalFlag`] type. const APPROVAL_FLAG_LEN: usize = 32; -pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; +pub trait Config: frame_system::Config { + type Event: From> + Into<::Event>; /// Identifier for the elections pallet's lock type ModuleId: Get; @@ -218,7 +218,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Elections { + trait Store for Module as Elections { // ---- parameters /// How long to give each top candidate to present themselves after the vote ends. @@ -286,7 +286,7 @@ decl_storage! { decl_error! { /// Error for the elections module. - pub enum Error for Module { + pub enum Error for Module { /// Reporter must be a voter. NotVoter, /// Target for inactivity cleanup must be active. @@ -345,7 +345,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// How much should be locked up in order to submit one's candidacy. A reasonable @@ -706,7 +706,7 @@ decl_module! { } decl_event!( - pub enum Event where ::AccountId { + pub enum Event where ::AccountId { /// Reaped \[voter, reaper\]. VoterReaped(AccountId, AccountId), /// Slashed \[reaper\]. @@ -719,7 +719,7 @@ decl_event!( } ); -impl Module { +impl Module { // exposed immutables. /// True if we're currently in a presentation period. diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index deec77da7b837..7c9bc9bfaf8b0 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,27 +19,27 @@ #![cfg(test)] -use std::cell::RefCell; use frame_support::{ StorageValue, StorageMap, parameter_types, assert_ok, - traits::{Get, ChangeMembers, Currency, LockIdentifier}, - weights::Weight, + traits::{ChangeMembers, Currency, LockIdentifier}, }; use sp_core::H256; use sp_runtime::{ - Perbill, BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, + BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; use crate as elections; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -51,25 +51,19 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); @@ -85,34 +79,11 @@ parameter_types! { pub const InactiveGracePeriod: u32 = 1; pub const VotingPeriod: u64 = 4; pub const MinimumVotingLock: u64 = 5; -} - -thread_local! { - static VOTER_BOND: RefCell = RefCell::new(0); - static VOTING_FEE: RefCell = RefCell::new(0); - static PRESENT_SLASH_PER_VOTER: RefCell = RefCell::new(0); - static DECAY_RATIO: RefCell = RefCell::new(0); - static MEMBERS: RefCell> = RefCell::new(vec![]); -} - -pub struct VotingBond; -impl Get for VotingBond { - fn get() -> u64 { VOTER_BOND.with(|v| *v.borrow()) } -} - -pub struct VotingFee; -impl Get for VotingFee { - fn get() -> u64 { VOTING_FEE.with(|v| *v.borrow()) } -} - -pub struct PresentSlashPerVoter; -impl Get for PresentSlashPerVoter { - fn get() -> u64 { PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow()) } -} - -pub struct DecayRatio; -impl Get for DecayRatio { - fn get() -> u32 { DECAY_RATIO.with(|v| *v.borrow()) } + pub static VotingBond: u64 = 0; + pub static VotingFee: u64 = 0; + pub static PresentSlashPerVoter: u64 = 0; + pub static DecayRatio: u32 = 0; + pub static Members: Vec = vec![]; } pub struct TestChangeMembers; @@ -134,7 +105,7 @@ parameter_types!{ pub const ElectionModuleId: LockIdentifier = *b"py/elect"; } -impl elections::Trait for Test { +impl elections::Config for Test { type Event = Event; type Currency = Balances; type BadPresentation = (); @@ -175,7 +146,7 @@ pub struct ExtBuilder { decay_ratio: u32, desired_seats: u32, voting_fee: u64, - voter_bond: u64, + voting_bond: u64, bad_presentation_punishment: u64, } @@ -186,7 +157,7 @@ impl Default for ExtBuilder { decay_ratio: 24, desired_seats: 2, voting_fee: 0, - voter_bond: 0, + voting_bond: 0, bad_presentation_punishment: 1, } } @@ -209,8 +180,8 @@ impl ExtBuilder { self.bad_presentation_punishment = fee; self } - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; + pub fn voting_bond(mut self, fee: u64) -> Self { + self.voting_bond = fee; self } pub fn desired_seats(mut self, seats: u32) -> Self { @@ -218,7 +189,7 @@ impl ExtBuilder { self } pub fn build(self) -> sp_io::TestExternalities { - VOTER_BOND.with(|v| *v.borrow_mut() = self.voter_bond); + VOTING_BOND.with(|v| *v.borrow_mut() = self.voting_bond); VOTING_FEE.with(|v| *v.borrow_mut() = self.voting_fee); PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index 92f6e11252b05..62e28eb6da082 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -298,7 +298,7 @@ fn voting_initial_set_approvals_ignores_voter_index() { } #[test] fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { - ExtBuilder::default().voting_fee(5).voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_fee(5).voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); (1..=63).for_each(|i| vote(i, 0)); @@ -365,7 +365,7 @@ fn voting_cannot_lock_less_than_limit() { #[test] fn voting_locking_more_than_total_balance_is_moot() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); assert_eq!(balances(&3), (30, 0)); @@ -381,7 +381,7 @@ fn voting_locking_more_than_total_balance_is_moot() { #[test] fn voting_locking_stake_and_reserving_bond_works() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_eq!(balances(&2), (20, 0)); @@ -558,7 +558,7 @@ fn retracting_inactive_voter_should_work() { #[test] fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { System::set_block_number(4); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); @@ -680,8 +680,8 @@ fn retracting_active_voter_should_slash_reporter() { assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::vote_index(), 2); - assert_eq!(::InactiveGracePeriod::get(), 1); - assert_eq!(::VotingPeriod::get(), 4); + assert_eq!(::InactiveGracePeriod::get(), 1); + assert_eq!(::VotingPeriod::get(), 4); assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 })); assert_ok!(Elections::reap_inactive_voter(Origin::signed(4), @@ -1107,7 +1107,7 @@ fn election_present_when_presenter_is_poor_should_not_work() { let test_present = |p| { ExtBuilder::default() .voting_fee(5) - .voter_bond(2) + .voting_bond(2) .bad_presentation_punishment(p) .build() .execute_with(|| { @@ -1507,7 +1507,7 @@ fn pot_winning_resets_accumulated_pot() { #[test] fn pot_resubmitting_approvals_stores_pot() { ExtBuilder::default() - .voter_bond(0) + .voting_bond(0) .voting_fee(0) .balance_factor(10) .build() diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml deleted file mode 100644 index a228dfb566be2..0000000000000 --- a/frame/evm/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pallet-evm" -version = "2.0.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME EVM contracts pallet" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -primitive-types = { version = "0.7.0", default-features = false, features = ["rlp", "byteorder"] } -rlp = { version = "0.4", default-features = false } -evm = { version = "0.17", default-features = false } -sha3 = { version = "0.8", default-features = false } -impl-trait-for-tuples = "0.1" -ripemd160 = { version = "0.9", default-features = false } - -[features] -default = ["std"] -std = [ - "serde", - "codec/std", - "sp-core/std", - "sp-runtime/std", - "frame-support/std", - "frame-system/std", - "pallet-balances/std", - "sp-io/std", - "sp-std/std", - "sha3/std", - "rlp/std", - "primitive-types/std", - "evm/std", - "pallet-timestamp/std", - "ripemd160/std", -] diff --git a/frame/evm/README.md b/frame/evm/README.md deleted file mode 100644 index f8feadbf58eb4..0000000000000 --- a/frame/evm/README.md +++ /dev/null @@ -1,3 +0,0 @@ -EVM execution module for Substrate - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs deleted file mode 100644 index b625c0c548026..0000000000000 --- a/frame/evm/src/backend.rs +++ /dev/null @@ -1,216 +0,0 @@ -use sp_std::marker::PhantomData; -use sp_std::vec::Vec; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_core::{U256, H256, H160}; -use sp_runtime::traits::UniqueSaturatedInto; -use frame_support::traits::Get; -use frame_support::{debug, storage::{StorageMap, StorageDoubleMap}}; -use sha3::{Keccak256, Digest}; -use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; -use crate::{Trait, AccountStorages, AccountCodes, Module, Event}; - -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// Ethereum account nonce, balance and code. Used by storage. -pub struct Account { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, -} - -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// Ethereum log. Used for `deposit_event`. -pub struct Log { - /// Source address of the log. - pub address: H160, - /// Topics of the log. - pub topics: Vec, - /// Byte array data of the log. - pub data: Vec, -} - -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// External input from the transaction. -pub struct Vicinity { - /// Current transaction gas price. - pub gas_price: U256, - /// Origin of the transaction. - pub origin: H160, -} - -/// Substrate backend for EVM. -pub struct Backend<'vicinity, T> { - vicinity: &'vicinity Vicinity, - _marker: PhantomData, -} - -impl<'vicinity, T> Backend<'vicinity, T> { - /// Create a new backend with given vicinity. - pub fn new(vicinity: &'vicinity Vicinity) -> Self { - Self { vicinity, _marker: PhantomData } - } -} - -impl<'vicinity, T: Trait> BackendT for Backend<'vicinity, T> { - fn gas_price(&self) -> U256 { self.vicinity.gas_price } - fn origin(&self) -> H160 { self.vicinity.origin } - - fn block_hash(&self, number: U256) -> H256 { - if number > U256::from(u32::max_value()) { - H256::default() - } else { - let number = T::BlockNumber::from(number.as_u32()); - H256::from_slice(frame_system::Module::::block_hash(number).as_ref()) - } - } - - fn block_number(&self) -> U256 { - let number: u128 = frame_system::Module::::block_number().unique_saturated_into(); - U256::from(number) - } - - fn block_coinbase(&self) -> H160 { - H160::default() - } - - fn block_timestamp(&self) -> U256 { - let now: u128 = pallet_timestamp::Module::::get().unique_saturated_into(); - U256::from(now / 1000) - } - - fn block_difficulty(&self) -> U256 { - U256::zero() - } - - fn block_gas_limit(&self) -> U256 { - U256::zero() - } - - fn chain_id(&self) -> U256 { - U256::from(T::ChainId::get()) - } - - fn exists(&self, _address: H160) -> bool { - true - } - - fn basic(&self, address: H160) -> evm::backend::Basic { - let account = Module::::account_basic(&address); - - evm::backend::Basic { - balance: account.balance, - nonce: account.nonce, - } - } - - fn code_size(&self, address: H160) -> usize { - AccountCodes::decode_len(&address).unwrap_or(0) - } - - fn code_hash(&self, address: H160) -> H256 { - H256::from_slice(Keccak256::digest(&AccountCodes::get(&address)).as_slice()) - } - - fn code(&self, address: H160) -> Vec { - AccountCodes::get(&address) - } - - fn storage(&self, address: H160, index: H256) -> H256 { - AccountStorages::get(address, index) - } -} - -impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { - fn apply( - &mut self, - values: A, - logs: L, - delete_empty: bool, - ) where - A: IntoIterator>, - I: IntoIterator, - L: IntoIterator, - { - for apply in values { - match apply { - Apply::Modify { - address, basic, code, storage, reset_storage, - } => { - Module::::mutate_account_basic(&address, Account { - nonce: basic.nonce, - balance: basic.balance, - }); - - if let Some(code) = code { - debug::debug!( - target: "evm", - "Inserting code ({} bytes) at {:?}", - code.len(), - address - ); - AccountCodes::insert(address, code); - } - - if reset_storage { - AccountStorages::remove_prefix(address); - } - - for (index, value) in storage { - if value == H256::default() { - debug::debug!( - target: "evm", - "Removing storage for {:?} [index: {:?}]", - address, - index - ); - AccountStorages::remove(address, index); - } else { - debug::debug!( - target: "evm", - "Updating storage for {:?} [index: {:?}, value: {:?}]", - address, - index, - value - ); - AccountStorages::insert(address, index, value); - } - } - - if delete_empty { - Module::::remove_account_if_empty(&address); - } - }, - Apply::Delete { address } => { - debug::debug!( - target: "evm", - "Deleting account at {:?}", - address - ); - Module::::remove_account(&address) - }, - } - } - - for log in logs { - debug::trace!( - target: "evm", - "Inserting log for {:?}, topics ({}) {:?}, data ({}): {:?}]", - log.address, - log.topics.len(), - log.topics, - log.data.len(), - log.data - ); - Module::::deposit_event(Event::::Log(Log { - address: log.address, - topics: log.topics, - data: log.data, - })); - } - } -} diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs deleted file mode 100644 index dddb71fc02a74..0000000000000 --- a/frame/evm/src/lib.rs +++ /dev/null @@ -1,645 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! EVM execution module for Substrate - -// Ensure we're `no_std` when compiling for Wasm. -#![cfg_attr(not(feature = "std"), no_std)] - -mod backend; -mod tests; -pub mod precompiles; - -pub use crate::precompiles::{Precompile, Precompiles}; -pub use crate::backend::{Account, Log, Vicinity, Backend}; - -use sp_std::vec::Vec; -#[cfg(feature = "std")] -use codec::{Encode, Decode}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use frame_support::{debug, ensure, decl_module, decl_storage, decl_event, decl_error}; -use frame_support::weights::{Weight, Pays}; -use frame_support::traits::{Currency, ExistenceRequirement, Get}; -use frame_support::dispatch::DispatchResultWithPostInfo; -use frame_system::RawOrigin; -use sp_core::{U256, H256, H160, Hasher}; -use sp_runtime::{AccountId32, traits::{UniqueSaturatedInto, SaturatedConversion, BadOrigin}}; -use sha3::{Digest, Keccak256}; -pub use evm::{ExitReason, ExitSucceed, ExitError, ExitRevert, ExitFatal}; -use evm::Config; -use evm::executor::StackExecutor; -use evm::backend::ApplyBackend; - -/// Type alias for currency balance. -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -/// Trait that outputs the current transaction gas price. -pub trait FeeCalculator { - /// Return the minimal required gas price. - fn min_gas_price() -> U256; -} - -impl FeeCalculator for () { - fn min_gas_price() -> U256 { U256::zero() } -} - -pub trait EnsureAddressOrigin { - /// Success return type. - type Success; - - /// Perform the origin check. - fn ensure_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - Self::try_address_origin(address, origin).map_err(|_| BadOrigin) - } - - /// Try with origin. - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result; -} - -/// Ensure that the EVM address is the same as the Substrate address. This only works if the account -/// ID is `H160`. -pub struct EnsureAddressSame; - -impl EnsureAddressOrigin for EnsureAddressSame where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = H160; - - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - origin.into().and_then(|o| match o { - RawOrigin::Signed(who) if &who == address => Ok(who), - r => Err(OuterOrigin::from(r)) - }) - } -} - -/// Ensure that the origin is root. -pub struct EnsureAddressRoot(sp_std::marker::PhantomData); - -impl EnsureAddressOrigin for EnsureAddressRoot where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = (); - - fn try_address_origin( - _address: &H160, - origin: OuterOrigin, - ) -> Result<(), OuterOrigin> { - origin.into().and_then(|o| match o { - RawOrigin::Root => Ok(()), - r => Err(OuterOrigin::from(r)), - }) - } -} - -/// Ensure that the origin never happens. -pub struct EnsureAddressNever(sp_std::marker::PhantomData); - -impl EnsureAddressOrigin for EnsureAddressNever { - type Success = AccountId; - - fn try_address_origin( - _address: &H160, - origin: OuterOrigin, - ) -> Result { - Err(origin) - } -} - -/// Ensure that the address is truncated hash of the origin. Only works if the account id is -/// `AccountId32`. -pub struct EnsureAddressTruncated; - -impl EnsureAddressOrigin for EnsureAddressTruncated where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = AccountId32; - - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - origin.into().and_then(|o| match o { - RawOrigin::Signed(who) - if AsRef::<[u8; 32]>::as_ref(&who)[0..20] == address[0..20] => Ok(who), - r => Err(OuterOrigin::from(r)) - }) - } -} - -pub trait AddressMapping { - fn into_account_id(address: H160) -> A; -} - -/// Identity address mapping. -pub struct IdentityAddressMapping; - -impl AddressMapping for IdentityAddressMapping { - fn into_account_id(address: H160) -> H160 { address } -} - -/// Hashed address mapping. -pub struct HashedAddressMapping(sp_std::marker::PhantomData); - -impl> AddressMapping for HashedAddressMapping { - fn into_account_id(address: H160) -> AccountId32 { - let mut data = [0u8; 24]; - data[0..4].copy_from_slice(b"evm:"); - data[4..24].copy_from_slice(&address[..]); - let hash = H::hash(&data); - - AccountId32::from(Into::<[u8; 32]>::into(hash)) - } -} - -/// Substrate system chain ID. -pub struct SystemChainId; - -impl Get for SystemChainId { - fn get() -> u64 { - sp_io::misc::chain_id() - } -} - -static ISTANBUL_CONFIG: Config = Config::istanbul(); - -/// EVM module trait -pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { - /// Calculator for current gas price. - type FeeCalculator: FeeCalculator; - - /// Allow the origin to call on behalf of given address. - type CallOrigin: EnsureAddressOrigin; - /// Allow the origin to withdraw on behalf of given address. - type WithdrawOrigin: EnsureAddressOrigin; - - /// Mapping from address to account id. - type AddressMapping: AddressMapping; - /// Currency type for withdraw and balance storage. - type Currency: Currency; - - /// The overarching event type. - type Event: From> + Into<::Event>; - /// Precompiles associated with this EVM engine. - type Precompiles: Precompiles; - /// Chain ID of EVM. - type ChainId: Get; - - /// EVM config used in the module. - fn config() -> &'static Config { - &ISTANBUL_CONFIG - } -} - -#[cfg(feature = "std")] -#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, Serialize, Deserialize)] -/// Account definition used for genesis block construction. -pub struct GenesisAccount { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, - /// Full account storage. - pub storage: std::collections::BTreeMap, - /// Account code. - pub code: Vec, -} - -decl_storage! { - trait Store for Module as EVM { - AccountCodes get(fn account_codes): map hasher(blake2_128_concat) H160 => Vec; - AccountStorages get(fn account_storages): - double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; - } - - add_extra_genesis { - config(accounts): std::collections::BTreeMap; - build(|config: &GenesisConfig| { - for (address, account) in &config.accounts { - Module::::mutate_account_basic(&address, Account { - balance: account.balance, - nonce: account.nonce, - }); - AccountCodes::insert(address, &account.code); - - for (index, value) in &account.storage { - AccountStorages::insert(address, index, value); - } - } - }); - } -} - -decl_event! { - /// EVM events - pub enum Event where - ::AccountId, - { - /// Ethereum events from contracts. - Log(Log), - /// A contract has been created at given \[address\]. - Created(H160), - /// A \[contract\] was attempted to be created, but the execution failed. - CreatedFailed(H160), - /// A \[contract\] has been executed successfully with states applied. - Executed(H160), - /// A \[contract\] has been executed with errors. States are reverted with only gas fees applied. - ExecutedFailed(H160), - /// A deposit has been made at a given address. \[sender, address, value\] - BalanceDeposit(AccountId, H160, U256), - /// A withdrawal has been made from a given address. \[sender, address, value\] - BalanceWithdraw(AccountId, H160, U256), - } -} - -decl_error! { - pub enum Error for Module { - /// Not enough balance to perform action - BalanceLow, - /// Calculating total fee overflowed - FeeOverflow, - /// Calculating total payment overflowed - PaymentOverflow, - /// Withdraw fee failed - WithdrawFailed, - /// Gas price is too low. - GasPriceTooLow, - /// Nonce is invalid - InvalidNonce, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Withdraw balance from EVM into currency/balances module. - #[weight = 0] - fn withdraw(origin, address: H160, value: BalanceOf) { - let destination = T::WithdrawOrigin::ensure_address_origin(&address, origin)?; - let address_account_id = T::AddressMapping::into_account_id(address); - - T::Currency::transfer( - &address_account_id, - &destination, - value, - ExistenceRequirement::AllowDeath - )?; - } - - /// Issue an EVM call operation. This is similar to a message call transaction in Ethereum. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn call( - origin, - source: H160, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_call( - source, - target, - input, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), _, _, _) => { - Module::::deposit_event(Event::::Executed(target)); - }, - (_, _, _, _) => { - Module::::deposit_event(Event::::ExecutedFailed(target)); - }, - } - - Ok(Pays::No.into()) - } - - /// Issue an EVM create operation. This is similar to a contract creation transaction in - /// Ethereum. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn create( - origin, - source: H160, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_create( - source, - init, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), create_address, _, _) => { - Module::::deposit_event(Event::::Created(create_address)); - }, - (_, create_address, _, _) => { - Module::::deposit_event(Event::::CreatedFailed(create_address)); - }, - } - - Ok(Pays::No.into()) - } - - /// Issue an EVM create2 operation. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn create2( - origin, - source: H160, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_create2( - source, - init, - salt, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), create_address, _, _) => { - Module::::deposit_event(Event::::Created(create_address)); - }, - (_, create_address, _, _) => { - Module::::deposit_event(Event::::CreatedFailed(create_address)); - }, - } - - Ok(Pays::No.into()) - } - } -} - -impl Module { - fn remove_account(address: &H160) { - AccountCodes::remove(address); - AccountStorages::remove_prefix(address); - } - - fn mutate_account_basic(address: &H160, new: Account) { - let account_id = T::AddressMapping::into_account_id(*address); - let current = Self::account_basic(address); - - if current.nonce < new.nonce { - // ASSUME: in one single EVM transaction, the nonce will not increase more than - // `u128::max_value()`. - for _ in 0..(new.nonce - current.nonce).low_u128() { - frame_system::Module::::inc_account_nonce(&account_id); - } - } - - if current.balance > new.balance { - let diff = current.balance - new.balance; - T::Currency::slash(&account_id, diff.low_u128().unique_saturated_into()); - } else if current.balance < new.balance { - let diff = new.balance - current.balance; - T::Currency::deposit_creating(&account_id, diff.low_u128().unique_saturated_into()); - } - } - - /// Check whether an account is empty. - pub fn is_account_empty(address: &H160) -> bool { - let account = Self::account_basic(address); - let code_len = AccountCodes::decode_len(address).unwrap_or(0); - - account.nonce == U256::zero() && - account.balance == U256::zero() && - code_len == 0 - } - - /// Remove an account if its empty. - pub fn remove_account_if_empty(address: &H160) { - if Self::is_account_empty(address) { - Self::remove_account(address); - } - } - - /// Get the account basic in EVM format. - pub fn account_basic(address: &H160) -> Account { - let account_id = T::AddressMapping::into_account_id(*address); - - let nonce = frame_system::Module::::account_nonce(&account_id); - let balance = T::Currency::free_balance(&account_id); - - Account { - nonce: U256::from(UniqueSaturatedInto::::unique_saturated_into(nonce)), - balance: U256::from(UniqueSaturatedInto::::unique_saturated_into(balance)), - } - } - - /// Execute a create transaction on behalf of given sender. - pub fn execute_create( - source: H160, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, H160, U256, Vec), Error> { - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| { - let address = executor.create_address( - evm::CreateScheme::Legacy { caller: source }, - ); - (executor.transact_create( - source, - value, - init, - gas_limit as usize, - ), address) - }, - ) - } - - /// Execute a create2 transaction on behalf of a given sender. - pub fn execute_create2( - source: H160, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, H160, U256, Vec), Error> { - let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| { - let address = executor.create_address( - evm::CreateScheme::Create2 { caller: source, code_hash, salt }, - ); - (executor.transact_create2( - source, - value, - init, - salt, - gas_limit as usize, - ), address) - }, - ) - } - - /// Execute a call transaction on behalf of a given sender. - pub fn execute_call( - source: H160, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, Vec, U256, Vec), Error> { - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| executor.transact_call( - source, - target, - value, - input, - gas_limit as usize, - ), - ) - } - - /// Execute an EVM operation. - fn execute_evm( - source: H160, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - f: F, - ) -> Result<(ExitReason, R, U256, Vec), Error> where - F: FnOnce(&mut StackExecutor>) -> (ExitReason, R), - { - - // Gas price check is skipped when performing a gas estimation. - if apply_state { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); - } - - let vicinity = Vicinity { - gas_price, - origin: source, - }; - - let mut backend = Backend::::new(&vicinity); - let mut executor = StackExecutor::new_with_precompile( - &backend, - gas_limit as usize, - T::config(), - T::Precompiles::execute, - ); - - let total_fee = gas_price.checked_mul(U256::from(gas_limit)) - .ok_or(Error::::FeeOverflow)?; - let total_payment = value.checked_add(total_fee).ok_or(Error::::PaymentOverflow)?; - let source_account = Self::account_basic(&source); - ensure!(source_account.balance >= total_payment, Error::::BalanceLow); - executor.withdraw(source, total_fee).map_err(|_| Error::::WithdrawFailed)?; - - if let Some(nonce) = nonce { - ensure!(source_account.nonce == nonce, Error::::InvalidNonce); - } - - let (retv, reason) = f(&mut executor); - - let used_gas = U256::from(executor.used_gas()); - let actual_fee = executor.fee(gas_price); - debug::debug!( - target: "evm", - "Execution {:?} [source: {:?}, value: {}, gas_limit: {}, used_gas: {}, actual_fee: {}]", - retv, - source, - value, - gas_limit, - used_gas, - actual_fee - ); - executor.deposit(source, total_fee.saturating_sub(actual_fee)); - - let (values, logs) = executor.deconstruct(); - let logs_data = logs.into_iter().map(|x| x ).collect::>(); - let logs_result = logs_data.clone().into_iter().map(|it| { - Log { - address: it.address, - topics: it.topics, - data: it.data - } - }).collect(); - if apply_state { - backend.apply(values, logs_data, true); - } - - Ok((retv, reason, used_gas, logs_result)) - } -} diff --git a/frame/evm/src/precompiles.rs b/frame/evm/src/precompiles.rs deleted file mode 100644 index 440d9bf1c68c2..0000000000000 --- a/frame/evm/src/precompiles.rs +++ /dev/null @@ -1,167 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Builtin precompiles. - -use sp_std::{cmp::min, vec::Vec}; -use sp_core::H160; -use evm::{ExitError, ExitSucceed}; -use ripemd160::Digest; -use impl_trait_for_tuples::impl_for_tuples; - -/// Custom precompiles to be used by EVM engine. -pub trait Precompiles { - /// Try to execute the code address as precompile. If the code address is not - /// a precompile or the precompile is not yet available, return `None`. - /// Otherwise, calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Some(Ok(status, output, gas_used))` if the execution - /// is successful. Otherwise return `Some(Err(_))`. - fn execute( - address: H160, - input: &[u8], - target_gas: Option, - ) -> Option, usize), ExitError>>; -} - -/// One single precompile used by EVM engine. -pub trait Precompile { - /// Try to execute the precompile. Calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Ok(status, output, gas_used)` if the execution is - /// successful. Otherwise return `Err(_)`. - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError>; -} - -#[impl_for_tuples(16)] -#[tuple_types_no_default_trait_bound] -impl Precompiles for Tuple { - for_tuples!( where #( Tuple: Precompile )* ); - - fn execute( - address: H160, - input: &[u8], - target_gas: Option, - ) -> Option, usize), ExitError>> { - let mut index = 0; - - for_tuples!( #( - index += 1; - if address == H160::from_low_u64_be(index) { - return Some(Tuple::execute(input, target_gas)) - } - )* ); - - None - } -} - -/// Linear gas cost -fn ensure_linear_cost( - target_gas: Option, - len: usize, - base: usize, - word: usize -) -> Result { - let cost = base.checked_add( - word.checked_mul(len.saturating_add(31) / 32).ok_or(ExitError::OutOfGas)? - ).ok_or(ExitError::OutOfGas)?; - - if let Some(target_gas) = target_gas { - if cost > target_gas { - return Err(ExitError::OutOfGas) - } - } - - Ok(cost) -} - -/// The identity precompile. -pub struct Identity; - -impl Precompile for Identity { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 15, 3)?; - - Ok((ExitSucceed::Returned, input.to_vec(), cost)) - } -} - -/// The ecrecover precompile. -pub struct ECRecover; - -impl Precompile for ECRecover { - fn execute( - i: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, i.len(), 3000, 0)?; - - let mut input = [0u8; 128]; - input[..min(i.len(), 128)].copy_from_slice(&i[..min(i.len(), 128)]); - - let mut msg = [0u8; 32]; - let mut sig = [0u8; 65]; - - msg[0..32].copy_from_slice(&input[0..32]); - sig[0..32].copy_from_slice(&input[64..96]); - sig[32..64].copy_from_slice(&input[96..128]); - sig[64] = input[63]; - - let pubkey = sp_io::crypto::secp256k1_ecdsa_recover(&sig, &msg) - .map_err(|_| ExitError::Other("Public key recover failed"))?; - let mut address = sp_io::hashing::keccak_256(&pubkey); - address[0..12].copy_from_slice(&[0u8; 12]); - - Ok((ExitSucceed::Returned, address.to_vec(), cost)) - } -} - -/// The ripemd precompile. -pub struct Ripemd160; - -impl Precompile for Ripemd160 { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 600, 120)?; - - let mut ret = [0u8; 32]; - ret[12..32].copy_from_slice(&ripemd160::Ripemd160::digest(input)); - Ok((ExitSucceed::Returned, ret.to_vec(), cost)) - } -} - -/// The sha256 precompile. -pub struct Sha256; - -impl Precompile for Sha256 { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 60, 12)?; - - let ret = sp_io::hashing::sha2_256(input); - Ok((ExitSucceed::Returned, ret.to_vec(), cost)) - } -} diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs deleted file mode 100644 index d05fdca1407e5..0000000000000 --- a/frame/evm/src/tests.rs +++ /dev/null @@ -1,189 +0,0 @@ -#![cfg(test)] - -use super::*; - -use std::{str::FromStr, collections::BTreeMap}; -use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, -}; -use sp_core::{Blake2Hasher, H256}; -use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum OuterCall for Test where origin: Origin { - self::EVM, - } -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} -impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = OuterCall; - type Hashing = BlakeTwo256; - type AccountId = AccountId32; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} -impl pallet_balances::Trait for Test { - type MaxLocks = (); - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = 1000; -} -impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - type WeightInfo = (); -} - -/// Fixed gas price of `0`. -pub struct FixedGasPrice; -impl FeeCalculator for FixedGasPrice { - fn min_gas_price() -> U256 { - // Gas price is always one token per gas. - 0.into() - } -} - -impl Trait for Test { - type FeeCalculator = FixedGasPrice; - - type CallOrigin = EnsureAddressRoot; - type WithdrawOrigin = EnsureAddressNever; - - type AddressMapping = HashedAddressMapping; - type Currency = Balances; - - type Event = Event; - type Precompiles = (); - type ChainId = SystemChainId; -} - -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type EVM = Module; - -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let mut accounts = BTreeMap::new(); - accounts.insert( - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1000000), - storage: Default::default(), - code: vec![ - 0x00, // STOP - ], - } - ); - accounts.insert( - H160::from_str("1000000000000000000000000000000000000002").unwrap(), - GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1000000), - storage: Default::default(), - code: vec![ - 0xff, // INVALID - ], - } - ); - - pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - GenesisConfig { accounts }.assimilate_storage::(&mut t).unwrap(); - t.into() -} - -#[test] -fn fail_call_return_ok() { - new_test_ext().execute_with(|| { - assert_ok!(EVM::call( - Origin::root(), - H160::default(), - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Vec::new(), - U256::default(), - 1000000, - U256::default(), - None, - )); - - assert_ok!(EVM::call( - Origin::root(), - H160::default(), - H160::from_str("1000000000000000000000000000000000000002").unwrap(), - Vec::new(), - U256::default(), - 1000000, - U256::default(), - None, - )); - }); -} - -#[test] -fn mutate_account_works() { - new_test_ext().execute_with(|| { - EVM::mutate_account_basic( - &H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Account { - nonce: U256::from(10), - balance: U256::from(1000), - }, - ); - - assert_eq!(EVM::account_basic( - &H160::from_str("1000000000000000000000000000000000000001").unwrap() - ), Account { - nonce: U256::from(10), - balance: U256::from(1000), - }); - }); -} diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 7db1d348ab2d8..5a2db258f8a19 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-offchain-worker" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -13,15 +13,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore", optional = true } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore", optional = true } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } lite-json = { version = "0.1", default-features = false } [features] diff --git a/frame/example-offchain-worker/README.md b/frame/example-offchain-worker/README.md index 4da1a4c15f814..5299027f39250 100644 --- a/frame/example-offchain-worker/README.md +++ b/frame/example-offchain-worker/README.md @@ -1,3 +1,4 @@ + # Offchain Worker Example Module The Offchain Worker Example: A simple pallet demonstrating @@ -6,9 +7,9 @@ concepts, APIs and structures common to most offchain workers. Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's documentation. -- [`pallet_example_offchain_worker::Trait`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/enum.Call.html) -- [`Module`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/struct.Module.html) +- [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) ## Overview @@ -23,4 +24,4 @@ Additional logic in OCW is put in place to prevent spamming the network with bot and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only one unsigned transaction floating in the network. -License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index 5fd5eff19bd73..dbcf7b10f4abd 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! //! # Offchain Worker Example Module //! //! The Offchain Worker Example: A simple pallet demonstrating @@ -23,7 +24,7 @@ //! Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's //! documentation. //! -//! - [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) +//! - [`pallet_example_offchain_worker::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -102,12 +103,12 @@ pub mod crypto { } /// This pallet's configuration trait -pub trait Trait: CreateSignedTransaction> { +pub trait Config: CreateSignedTransaction> { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching dispatch call type. type Call: From>; @@ -148,7 +149,7 @@ impl SignedPayload for PricePayload as ExampleOffchainWorker { + trait Store for Module as ExampleOffchainWorker { /// A vector of recently submitted prices. /// /// This is used to calculate average price, should have bounded size. @@ -164,7 +165,7 @@ decl_storage! { decl_event!( /// Events generated by the module. - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// Event generated when new price is accepted to contribute to the average. /// \[price, who\] NewPrice(u32, AccountId), @@ -173,7 +174,7 @@ decl_event!( decl_module! { /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; /// Submit new price to the list. @@ -309,7 +310,7 @@ enum TransactionType { /// /// This greatly helps with error messages, as the ones inside the macro /// can sometimes be hard to debug. -impl Module { +impl Module { /// Chooses which transaction type to send. /// /// This function serves mostly to showcase `StorageValue` helper @@ -678,7 +679,7 @@ impl Module { } #[allow(deprecated)] // ValidateUnsigned -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; /// Validate unsigned call to this module. diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 204b366964f47..6f73ffcb9e151 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,12 +16,10 @@ // limitations under the License. use crate::*; +use crate as example_offchain_worker; use std::sync::Arc; -use codec::{Encode, Decode}; -use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, - weights::Weight, -}; +use codec::Decode; +use frame_support::{assert_ok, parameter_types}; use sp_core::{ H256, offchain::{OffchainExt, TransactionPoolExt, testing}, @@ -33,7 +31,7 @@ use sp_keystore::{ testing::KeyStore, }; use sp_runtime::{ - Perbill, RuntimeAppPublic, + RuntimeAppPublic, testing::{Header, TestXt}, traits::{ BlakeTwo256, IdentityLookup, Extrinsic as ExtrinsicT, @@ -41,25 +39,33 @@ use sp_runtime::{ }, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// For testing the module, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Example: example_offchain_worker::{Module, Call, Storage, Event, ValidateUnsigned}, + } +); -// For testing the module, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of modules we want to use. -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -67,24 +73,18 @@ impl frame_system::Trait for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -type Extrinsic = TestXt, ()>; +type Extrinsic = TestXt; type AccountId = <::Signer as IdentifyAccount>::AccountId; impl frame_system::offchain::SigningTypes for Test { @@ -93,21 +93,21 @@ impl frame_system::offchain::SigningTypes for Test { } impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, + Call: From, { - type OverarchingCall = Call; + type OverarchingCall = Call; type Extrinsic = Extrinsic; } impl frame_system::offchain::CreateSignedTransaction for Test where - Call: From, + Call: From, { fn create_transaction>( - call: Call, + call: Call, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(Call, ::SignaturePayload)> { + ) -> Option<(Call, ::SignaturePayload)> { Some((call, (nonce, ()))) } } @@ -118,17 +118,15 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type AuthorityId = crypto::TestAuthId; - type Call = Call; + type Call = Call; type GracePeriod = GracePeriod; type UnsignedInterval = UnsignedInterval; type UnsignedPriority = UnsignedPriority; } -type Example = Module; - #[test] fn it_aggregates_the_price() { sp_io::TestExternalities::default().execute_with(|| { @@ -233,7 +231,7 @@ fn should_submit_signed_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, Call::submit_price(15523)); + assert_eq!(tx.call, Call::Example(crate::Call::submit_price(15523))); }); } @@ -277,12 +275,12 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::submit_price_unsigned_with_signed_payload(body, signature) = tx.call { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload(body, signature)) = tx.call { assert_eq!(body, price_payload); let signature_valid = ::Public, - ::BlockNumber + ::BlockNumber > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); @@ -330,12 +328,12 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::submit_price_unsigned_with_signed_payload(body, signature) = tx.call { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload(body, signature)) = tx.call { assert_eq!(body, price_payload); let signature_valid = ::Public, - ::BlockNumber + ::BlockNumber > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); @@ -365,7 +363,7 @@ fn should_submit_raw_unsigned_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - assert_eq!(tx.call, Call::submit_price_unsigned(1, 15523)); + assert_eq!(tx.call, Call::Example(crate::Call::submit_price_unsigned(1, 15523))); }); } diff --git a/frame/example-parallel/Cargo.toml b/frame/example-parallel/Cargo.toml index 01a612fb82fbf..b2f28887cec0b 100644 --- a/frame/example-parallel/Cargo.toml +++ b/frame/example-parallel/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-parallel" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -12,14 +12,17 @@ description = "FRAME example pallet using runtime worker threads" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-tasks = { version = "2.0.0", default-features = false, path = "../../primitives/tasks" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-tasks = { version = "3.0.0", default-features = false, path = "../../primitives/tasks" } + +[dev-dependencies] +serde = { version = "1.0.101" } [features] default = ["std"] diff --git a/frame/example-parallel/src/lib.rs b/frame/example-parallel/src/lib.rs index 4b7ce72b4d40e..c83a722be127f 100644 --- a/frame/example-parallel/src/lib.rs +++ b/frame/example-parallel/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,15 +34,15 @@ use sp_std::vec::Vec; #[cfg(test)] mod tests; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// The overarching dispatch call type. type Call: From>; } decl_storage! { - trait Store for Module as ExampleOffchainWorker { + trait Store for Module as ExampleOffchainWorker { /// A vector of current participants /// /// To enlist someone to participate, signed payload should be @@ -87,7 +87,7 @@ impl EnlistedParticipant { decl_module! { /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; /// Get the new event running. diff --git a/frame/example-parallel/src/tests.rs b/frame/example-parallel/src/tests.rs index 1da8c60388266..9c921e0ddfa8a 100644 --- a/frame/example-parallel/src/tests.rs +++ b/frame/example-parallel/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,35 +15,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; +use crate::{self as pallet_example_parallel, *}; -use codec::{Encode, Decode}; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ - Perbill, - testing::{Header}, + Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Example: pallet_example_parallel::{Module, Call, Storage, Event}, + } +); -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); type Origin = Origin; - type Call = (); - type PalletInfo = (); + type Call = Call; + type PalletInfo = PalletInfo; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -51,20 +55,17 @@ impl frame_system::Trait for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; + type BlockWeights = (); + type BlockLength = (); type Version = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { @@ -73,13 +74,11 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Test { - type Event = (); - type Call = Call; +impl Config for Test { + type Event = Event; + type Call = Call; } -type Example = Module; - #[test] fn it_can_enlist() { use sp_core::Pair; diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 41889ea4828d0..c6dfc018b3f52 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../balances" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } +sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } [features] default = ["std"] diff --git a/frame/example/README.md b/frame/example/README.md index 05ef4cd4351cf..46a0d076a969a 100644 --- a/frame/example/README.md +++ b/frame/example/README.md @@ -1,3 +1,4 @@ + # Example Pallet @@ -194,7 +195,7 @@ Copy and paste this template from frame/example/src/lib.rs into file \```rust use ; -pub trait Trait: ::Trait { } +pub trait Config: ::Config { } \``` \### Simple Code Snippet @@ -234,4 +235,4 @@ pub trait Trait: ::Trait { } // that the implementation is based on.

-License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 4b10804fb10f7..335c277b7c2a3 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! //! # Example Pallet //! //! @@ -62,7 +63,7 @@ //! // Include the following links that shows what trait needs to be implemented to use the pallet //! // and the supported dispatchables that are documented in the Call enum. //! -//! - \[`::Trait`](./trait.Trait.html) +//! - \[`::Config`](./trait.Config.html) //! - \[`Call`](./enum.Call.html) //! - \[`Module`](./struct.Module.html) //! @@ -130,7 +131,7 @@ //! //! //! // Reference documentation of aspects such as `storageItems` and `dispatchable` functions should only be -//! // included in the https://docs.rs Rustdocs for Substrate and not repeated in the README file. +//! // included in the Rustdocs for Substrate and not repeated in the README file. //! //! \### Dispatchable Functions //! @@ -211,7 +212,7 @@ //! \```rust //! use ; //! -//! pub trait Trait: ::Trait { } +//! pub trait Config: ::Config { } //! \``` //! //! \### Simple Code Snippet @@ -223,8 +224,8 @@ //! // Show a usage example in an actual runtime //! //! // See: -//! // - Substrate TCR https://github.com/parity-samples/substrate-tcr -//! // - Substrate Kitties https://shawntabrizi.github.io/substrate-collectables-workshop/#/ +//! // - Substrate TCR +//! // - Substrate Kitties //! //! \## Genesis Config //! @@ -285,9 +286,9 @@ use sp_runtime::{ // - The final weight of each dispatch is calculated as the argument of the call multiplied by the // parameter given to the `WeightForSetDummy`'s constructor. // - assigns a dispatch class `operational` if the argument of the call is more than 1000. -struct WeightForSetDummy(BalanceOf); +struct WeightForSetDummy(BalanceOf); -impl WeighData<(&BalanceOf,)> for WeightForSetDummy +impl WeighData<(&BalanceOf,)> for WeightForSetDummy { fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { let multiplier = self.0; @@ -295,7 +296,7 @@ impl WeighData<(&BalanceOf,)> for WeightForSetDumm } } -impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { +impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { fn classify_dispatch(&self, target: (&BalanceOf,)) -> DispatchClass { if *target.0 > >::from(1000u32) { DispatchClass::Operational @@ -305,23 +306,23 @@ impl ClassifyDispatch<(&BalanceOf,)> for WeightFor } } -impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { +impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { fn pays_fee(&self, _target: (&BalanceOf,)) -> Pays { Pays::Yes } } /// A type alias for the balance type from this pallet's point of view. -type BalanceOf = ::Balance; +type BalanceOf = ::Balance; /// Our pallet's configuration trait. All our types and constants go in here. If the /// pallet is dependent on specific other pallets, then their configuration traits /// should be added to our implied traits list. /// -/// `frame_system::Trait` should always be included in our implied traits. -pub trait Trait: pallet_balances::Trait { +/// `frame_system::Config` should always be included in our implied traits. +pub trait Config: pallet_balances::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } decl_storage! { @@ -332,7 +333,7 @@ decl_storage! { // It is important to update your storage name so that your pallet's // storage items are isolated from other pallets. // ---------------------------------vvvvvvv - trait Store for Module as Example { + trait Store for Module as Example { // Any storage declarations of the form: // `pub? Name get(fn getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` // where `` is either: @@ -370,7 +371,7 @@ decl_event!( /// Events are a simple means of reporting specific conditions and /// circumstances that have happened that users, Dapps and/or chain explorers would find /// interesting and otherwise difficult to detect. - pub enum Event where B = ::Balance { + pub enum Event where B = ::Balance { // Just a normal `enum`, here's a dummy event to ensure it compiles. /// Dummy event, just here so there's a generic type that's used. Dummy(B), @@ -413,7 +414,7 @@ decl_event!( // `ensure_root` and `ensure_none`. decl_module! { // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Deposit one of this pallet's events by using the default implementation. /// It is also possible to provide a custom implementation. /// For non-generic events, the generic parameter just needs to be dropped, so that it @@ -547,7 +548,7 @@ decl_module! { // - Public interface. These are functions that are `pub` and generally fall into inspector // functions that do not write to storage and operation functions that do. // - Private functions. These are your usual private utilities unavailable to other pallets. -impl Module { +impl Module { // Add public immutables and private mutables. #[allow(dead_code)] fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { @@ -570,13 +571,14 @@ impl Module { // decodable type that implements `SignedExtension`. See the trait definition for the full list of // bounds. As a convention, you can follow this approach to create an extension for your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` -// (needed for the compiler to accept `T: Trait`) will suffice. +// (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire // struct to be decodable, each individual item also needs to be decodable. // // Note that a signed extension can also indicate that a particular data must be present in the // _signing payload_ of a transaction by providing an implementation for the `additional_signed` -// method. This example will not cover this type of extension. See `CheckRuntime` in FRAME System +// method. This example will not cover this type of extension. See `CheckSpecVersion` in +// [FRAME System](https://github.com/paritytech/substrate/tree/master/frame/system#signed-extensions) // for an example. // // Using the extension, you can add some hooks to the life cycle of each transaction. Note that by @@ -601,21 +603,21 @@ impl Module { /// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No /// particular reason why, just to demonstrate the power of signed extensions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct WatchDummy(PhantomData); +pub struct WatchDummy(PhantomData); -impl sp_std::fmt::Debug for WatchDummy { +impl sp_std::fmt::Debug for WatchDummy { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "WatchDummy") } } -impl SignedExtension for WatchDummy +impl SignedExtension for WatchDummy where - ::Call: IsSubType>, + ::Call: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = (); type Pre = (); @@ -654,20 +656,15 @@ mod benchmarking { use frame_system::RawOrigin; benchmarks!{ - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); - } - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. accumulate_dummy { - let b in ...; + let b in 1 .. 1000; let caller = account("caller", 0, 0); }: _ (RawOrigin::Signed(caller), b.into()) // This will measure the execution time of `set_dummy` for b in [1..1000] range. set_dummy { - let b in ...; + let b in 1 .. 1000; }: set_dummy (RawOrigin::Root, b.into()) // This will measure the execution time of `set_dummy` for b in [1..10] range. @@ -710,96 +707,94 @@ mod tests { use super::*; use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, + assert_ok, parameter_types, weights::{DispatchInfo, GetDispatchInfo}, traits::{OnInitialize, OnFinalize} }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - Perbill, - testing::Header, + testing::Header, BuildStorage, traits::{BlakeTwo256, IdentityLookup}, }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - impl_outer_dispatch! { - pub enum OuterCall for Test where origin: Origin { - self::Example, + // Reexport crate as its pallet name for construct_runtime. + use crate as pallet_example; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + // For testing the pallet, we construct a mock runtime. + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Example: pallet_example::{Module, Call, Storage, Config, Event}, } - } + ); - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = OuterCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); - type Event = (); + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; } - type System = frame_system::Module; - type Example = Module; // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ - dummy: 42, - // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], - foo: 24, - }.assimilate_storage(&mut t).unwrap(); + let t = GenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + frame_system: Some(Default::default()), + pallet_balances: Some(Default::default()), + pallet_example: Some(pallet_example::GenesisConfig { + dummy: 42, + // we configure the map with (key, value) pairs. + bar: vec![(1, 2), (2, 3)], + foo: 24, + }), + }.build_storage().unwrap(); t.into() } @@ -836,7 +831,7 @@ mod tests { #[test] fn signed_ext_watch_dummy_works() { new_test_ext().execute_with(|| { - let call = >::set_dummy(10).into(); + let call = >::set_dummy(10).into(); let info = DispatchInfo::default(); assert_eq!( @@ -855,13 +850,13 @@ mod tests { #[test] fn weights_work() { // must have a defined weight. - let default_call = >::accumulate_dummy(10); + let default_call = >::accumulate_dummy(10); let info = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` assert_eq!(info.weight, 0); // must have a custom weight of `100 * arg = 2000` - let custom_call = >::set_dummy(20); + let custom_call = >::set_dummy(20); let info = custom_call.get_dispatch_info(); assert_eq!(info.weight, 2000); } diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 3bd8da04e6cf1..31f1f34174ed1 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-executive" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,24 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../../primitives/tracing" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "3.0.0", default-features = false, path = "../../primitives/tracing" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] hex-literal = "0.3.1" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -pallet-indices = { version = "2.0.0", path = "../indices" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +pallet-indices = { version = "3.0.0", path = "../indices" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-transaction-payment = { version = "3.0.0", path = "../transaction-payment" } +sp-version = { version = "3.0.0", path = "../../primitives/version" } [features] default = ["std"] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 96e7a6c04094b..df1ae17df613f 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -117,7 +117,7 @@ use sp_std::{prelude::*, marker::PhantomData}; use frame_support::{ - storage::StorageValue, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade, OffchainWorker}, dispatch::PostDispatchInfo, }; @@ -130,7 +130,7 @@ use sp_runtime::{ transaction_validity::{TransactionValidity, TransactionSource}, }; use codec::{Codec, Encode}; -use frame_system::{extrinsics_root, DigestOf}; +use frame_system::DigestOf; /// Trait that can be used to execute a block. pub trait ExecuteBlock { @@ -145,7 +145,7 @@ pub type OriginOf = as Dispatchable>::Origin; /// Main entry point for certain runtime actions as e.g. `execute_block`. /// /// Generic parameters: -/// - `System`: Something that implements `frame_system::Trait` +/// - `System`: Something that implements `frame_system::Config` /// - `Block`: The block type of the runtime /// - `Context`: The context that is used when checking an extrinsic. /// - `UnsignedValidator`: The unsigned transaction validator of the runtime. @@ -158,7 +158,7 @@ pub struct Executive, Context: Default, UnsignedValidator, @@ -185,7 +185,7 @@ where } impl< - System: frame_system::Trait, + System: frame_system::Config, Block: traits::Block, Context: Default, UnsignedValidator, @@ -213,7 +213,6 @@ where Self::initialize_block_impl( header.number(), header.parent_hash(), - header.extrinsics_root(), &digests ); } @@ -231,26 +230,30 @@ where fn initialize_block_impl( block_number: &System::BlockNumber, parent_hash: &System::Hash, - extrinsics_root: &System::Hash, digest: &Digest, ) { + let mut weight = 0; if Self::runtime_upgraded() { // System is not part of `AllModules`, so we need to call this manually. - let mut weight = as OnRuntimeUpgrade>::on_runtime_upgrade(); + weight = weight.saturating_add( as OnRuntimeUpgrade>::on_runtime_upgrade()); weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); weight = weight.saturating_add(::on_runtime_upgrade()); - >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); } >::initialize( block_number, parent_hash, - extrinsics_root, digest, frame_system::InitKind::Full, ); - as OnInitialize>::on_initialize(*block_number); - let weight = >::on_initialize(*block_number) - .saturating_add(>::get()); + weight = weight.saturating_add( + as OnInitialize>::on_initialize(*block_number) + ); + weight = weight.saturating_add( + >::on_initialize(*block_number) + ); + weight = weight.saturating_add( + >::get().base_block + ); >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); frame_system::Module::::note_finished_initialize(); @@ -258,11 +261,11 @@ where /// Returns if the runtime was upgraded since the last time this function was called. fn runtime_upgraded() -> bool { - let last = frame_system::LastRuntimeUpgrade::get(); + let last = frame_system::LastRuntimeUpgrade::::get(); let current = >::get(); if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::put( + frame_system::LastRuntimeUpgrade::::put( frame_system::LastRuntimeUpgradeInfo::from(current), ); true @@ -280,13 +283,8 @@ where assert!( n > System::BlockNumber::zero() && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), - "Parent hash should be valid." + "Parent hash should be valid.", ); - - // Check that transaction trie root represents the transactions. - let xts_root = extrinsics_root::(&block.extrinsics()); - header.extrinsics_root().check_equal(&xts_root); - assert!(header.extrinsics_root() == &xts_root, "Transaction trie root must be valid."); } /// Actually execute all transitions for `block`. @@ -316,8 +314,14 @@ where } /// Execute given extrinsics and take care of post-extrinsics book-keeping. - fn execute_extrinsics_with_book_keeping(extrinsics: Vec, block_number: NumberFor) { - extrinsics.into_iter().for_each(Self::apply_extrinsic_no_note); + fn execute_extrinsics_with_book_keeping( + extrinsics: Vec, + block_number: NumberFor, + ) { + extrinsics.into_iter().for_each(|e| if let Err(e) = Self::apply_extrinsic(e) { + let err: &'static str = e.into(); + panic!(err) + }); // post-extrinsics book-keeping >::note_finished_extrinsics(); @@ -335,8 +339,6 @@ where as OnFinalize>::on_finalize(block_number); >::on_finalize(block_number); - // set up extrinsics - >::derive_extrinsics(); >::finalize() } @@ -348,23 +350,14 @@ where sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); - Self::apply_extrinsic_with_len(uxt, encoded_len, Some(encoded)) - } - - /// Apply an extrinsic inside the block execution function. - fn apply_extrinsic_no_note(uxt: Block::Extrinsic) { - let l = uxt.encode().len(); - match Self::apply_extrinsic_with_len(uxt, l, None) { - Ok(_) => (), - Err(e) => { let err: &'static str = e.into(); panic!(err) }, - } + Self::apply_extrinsic_with_len(uxt, encoded_len, encoded) } /// Actually apply an extrinsic given its `encoded_len`; this doesn't note its hash. fn apply_extrinsic_with_len( uxt: Block::Extrinsic, encoded_len: usize, - to_note: Option>, + to_note: Vec, ) -> ApplyExtrinsicResult { sp_tracing::enter_span!( sp_tracing::info_span!("apply_extrinsic", @@ -376,9 +369,7 @@ where // We don't need to make sure to `note_extrinsic` only after we know it's going to be // executed to prevent it from leaking in storage since at this point, it will either // execute or panic (and revert storage changes). - if let Some(encoded) = to_note { - >::note_extrinsic(encoded); - } + >::note_extrinsic(to_note); // AUDIT: Under no circumstances may this function panic from here onwards. @@ -412,6 +403,11 @@ where let storage_root = new_header.state_root(); header.state_root().check_equal(&storage_root); assert!(header.state_root() == storage_root, "Storage root must match that calculated."); + + assert!( + header.extrinsics_root() == new_header.extrinsics_root(), + "Transaction trie root must be valid.", + ); } /// Check a given signed transaction for validity. This doesn't execute any @@ -451,25 +447,25 @@ where // We need to keep events available for offchain workers, // hence we initialize the block manually. // OffchainWorker RuntimeApi should skip initialization. - let digests = Self::extract_pre_digest(header); + let digests = header.digest().clone(); >::initialize( header.number(), header.parent_hash(), - header.extrinsics_root(), &digests, frame_system::InitKind::Inspection, ); + // Frame system only inserts the parent hash into the block hashes as normally we don't know + // the hash for the header before. However, here we are aware of the hash and we can add it + // as well. + frame_system::BlockHash::::insert(header.number(), header.hash()); + // Initialize logger, so the log messages are visible // also when running WASM. frame_support::debug::RuntimeLogger::init(); - >::offchain_worker( - // to maintain backward compatibility we call module offchain workers - // with parent block number. - header.number().saturating_sub(1u32.into()) - ) + >::offchain_worker(*header.number()) } } @@ -479,7 +475,7 @@ mod tests { use super::*; use sp_core::H256; use sp_runtime::{ - generic::Era, Perbill, DispatchError, testing::{Digest, Header, Block}, + generic::{Era, DigestItem}, DispatchError, testing::{Digest, Header, Block}, traits::{Header as HeaderT, BlakeTwo256, IdentityLookup}, transaction_validity::{ InvalidTransaction, ValidTransaction, TransactionValidityError, UnknownTransaction @@ -490,7 +486,9 @@ mod tests { weights::{Weight, RuntimeDbWeight, IdentityFee, WeightToFeePolynomial}, traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, }; - use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ + Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo, + }; use pallet_transaction_payment::CurrencyAdapter; use pallet_balances::Call as BalancesCall; use hex_literal::hex; @@ -502,10 +500,10 @@ mod tests { UnknownTransaction, TransactionSource, TransactionValidity }; - pub trait Trait: frame_system::Trait {} + pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 100] fn some_function(origin) { // NOTE: does not make any different. @@ -543,12 +541,22 @@ mod tests { fn on_runtime_upgrade() -> Weight { sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - 0 + 200 + } + + fn offchain_worker(n: T::BlockNumber) { + assert_eq!(T::BlockNumber::from(1u32), n); + } + + #[weight = 0] + fn calculate_storage_root(origin) { + let root = sp_io::storage::root(); + sp_io::storage::set("storage_root".as_bytes(), &root); } } } - impl sp_runtime::traits::ValidateUnsigned for Module { + impl sp_runtime::traits::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned( @@ -577,18 +585,22 @@ mod tests { parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::builder() + .base_block(10) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = 5) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = 1024.into()) + .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, write: 100, }; } - impl frame_system::Trait for Runtime { + impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type Call = Call; @@ -600,26 +612,20 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = RuntimeVersion; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } type Balance = u64; parameter_types! { pub const ExistentialDeposit: Balance = 1; } - impl pallet_balances::Trait for Runtime { + impl pallet_balances::Config for Runtime { type Balance = Balance; type Event = Event; type DustRemoval = (); @@ -632,13 +638,13 @@ mod tests { parameter_types! { pub const TransactionByteFee: Balance = 0; } - impl pallet_transaction_payment::Trait for Runtime { + impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } - impl custom::Trait for Runtime {} + impl custom::Config for Runtime {} pub struct RuntimeVersion; impl frame_support::traits::Get for RuntimeVersion { @@ -661,8 +667,8 @@ mod tests { type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< - ::AccountId, - ::Call, + ::AccountId, + ::Call, (), SignedExtra, >; @@ -675,7 +681,7 @@ mod tests { fn on_runtime_upgrade() -> Weight { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); - 0 + 100 } } @@ -708,9 +714,10 @@ mod tests { balances: vec![(1, 211)], }.assimilate_storage(&mut t).unwrap(); let xt = TestXt::new(Call::Balances(BalancesCall::transfer(2, 69)), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + ::ExtrinsicBaseWeight::get(); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic; let fee: Balance - = ::WeightToFee::calc(&weight); + = ::WeightToFee::calc(&weight); let mut t = sp_io::TestExternalities::new(t); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -742,7 +749,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("465a1569d309039bdf84b0479d28064ea29e6584584dc7d788904bb14489c6f6").into(), + state_root: hex!("1599922f15b2d5cf75e83370e29e13b96fdf799d917a5b6319736af292f21665").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, @@ -810,9 +817,11 @@ mod tests { let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); let encoded = xt.encode(); let encoded_len = encoded.len() as Weight; - // Block execution weight + on_initialize weight - let base_block_weight = 175 + ::BlockExecutionWeight::get(); - let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - base_block_weight; + // on_initialize weight + base block execution weight + let block_weights = ::BlockWeights::get(); + let base_block_weight = 175 + block_weights.base_block; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() + - base_block_weight; let num_to_exhaust_block = limit / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -854,7 +863,7 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = 175 + ::BlockExecutionWeight::get(); + let base_block_weight = 175 + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -872,7 +881,8 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = len as Weight + ::ExtrinsicBaseWeight::get(); + let extrinsic_weight = len as Weight + ::BlockWeights + ::get().get(DispatchClass::Normal).base_extrinsic; assert_eq!( >::block_weight().total(), base_block_weight + 3 * extrinsic_weight, @@ -938,10 +948,13 @@ mod tests { Call::System(SystemCall::remark(vec![1u8])), sign_extra(1, 0, 0), ); - let weight = xt.get_dispatch_info().weight - + ::ExtrinsicBaseWeight::get(); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights + ::get() + .get(DispatchClass::Normal) + .base_extrinsic; let fee: Balance = - ::WeightToFee::calc(&weight); + ::WeightToFee::calc(&weight); Executive::initialize_block(&Header::new( 1, H256::default(), @@ -985,7 +998,7 @@ mod tests { new_test_ext(1).execute_with(|| { RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::exists()); + assert!(frame_system::LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { @@ -995,7 +1008,7 @@ mod tests { assert!(Executive::runtime_upgraded()); assert_eq!( Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::get(), + frame_system::LastRuntimeUpgrade::::get(), ); RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { @@ -1006,7 +1019,7 @@ mod tests { assert!(Executive::runtime_upgraded()); assert_eq!( Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::get(), + frame_system::LastRuntimeUpgrade::::get(), ); RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { @@ -1017,11 +1030,11 @@ mod tests { }); assert!(!Executive::runtime_upgraded()); - frame_system::LastRuntimeUpgrade::take(); + frame_system::LastRuntimeUpgrade::::take(); assert!(Executive::runtime_upgraded()); assert_eq!( Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::get(), + frame_system::LastRuntimeUpgrade::::get(), ); }) } @@ -1073,4 +1086,92 @@ mod tests { assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); }); } + + #[test] + fn all_weights_are_recorded_correctly() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called for maximum complexity + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + ..Default::default() + }); + + let block_number = 1; + + Executive::initialize_block(&Header::new( + block_number, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + // All weights that show up in the `initialize_block_impl` + let frame_system_upgrade_weight = frame_system::Module::::on_runtime_upgrade(); + let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); + let runtime_upgrade_weight = ::on_runtime_upgrade(); + let frame_system_on_initialize_weight = frame_system::Module::::on_initialize(block_number); + let on_initialize_weight = >::on_initialize(block_number); + let base_block_weight = ::BlockWeights::get().base_block; + + // Weights are recorded correctly + assert_eq!( + frame_system::Module::::block_weight().total(), + frame_system_upgrade_weight + + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + frame_system_on_initialize_weight + + on_initialize_weight + + base_block_weight, + ); + }); + } + + #[test] + fn offchain_worker_works_as_expected() { + new_test_ext(1).execute_with(|| { + let parent_hash = sp_core::H256::from([69u8; 32]); + let mut digest = Digest::default(); + digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); + + let header = Header::new( + 1, + H256::default(), + H256::default(), + parent_hash, + digest.clone(), + ); + + Executive::offchain_worker(&header); + + assert_eq!(digest, System::digest()); + assert_eq!(parent_hash, System::block_hash(0)); + assert_eq!(header.hash(), System::block_hash(1)); + }); + } + + #[test] + fn calculating_storage_root_twice_works() { + let call = Call::Custom(custom::Call::calculate_storage_root()); + let xt = TestXt::new(call, sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt])); + }); + } } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 6cde1061df87c..a9ba0ccc56f3b 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-grandpa" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,30 +14,30 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "3.0.0", default-features = false, path = "../../primitives/finality-grandpa" } +sp-session = { version = "3.0.0", default-features = false, path = "../../primitives/session" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } +pallet-session = { version = "3.0.0", default-features = false, path = "../session" } [dev-dependencies] -frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } -grandpa = { package = "finality-grandpa", version = "0.12.3", features = ["derive-codec"] } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-offences = { version = "2.0.0", path = "../offences" } -pallet-staking = { version = "2.0.0", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } +frame-benchmarking = { version = "3.0.0", path = "../benchmarking" } +grandpa = { package = "finality-grandpa", version = "0.13.0", features = ["derive-codec"] } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-offences = { version = "3.0.0", path = "../offences" } +pallet-staking = { version = "3.0.0", path = "../staking" } +pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } +pallet-timestamp = { version = "3.0.0", path = "../timestamp" } [features] default = ["std"] diff --git a/frame/grandpa/src/benchmarking.rs b/frame/grandpa/src/benchmarking.rs index bac2c24584464..5f08a5ea4bac0 100644 --- a/frame/grandpa/src/benchmarking.rs +++ b/frame/grandpa/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,8 +25,6 @@ use frame_system::RawOrigin; use sp_core::H256; benchmarks! { - _ { } - check_equivocation_proof { let x in 0 .. 1; diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 4893fc2cf1860..63122fcf4b538 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index e9662a726c40e..b8bff59d3920c 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,7 +40,10 @@ use sp_std::prelude::*; use codec::{self as codec, Decode, Encode}; -use frame_support::{debug, traits::KeyOwnerProofSystem}; +use frame_support::{ + debug, + traits::{Get, KeyOwnerProofSystem}, +}; use sp_finality_grandpa::{EquivocationProof, RoundNumber, SetId}; use sp_runtime::{ transaction_validity::{ @@ -54,16 +57,20 @@ use sp_staking::{ SessionIndex, }; -use super::{Call, Module, Trait}; +use super::{Call, Module, Config}; /// A trait with utility methods for handling equivocation reports in GRANDPA. /// The offence type is generic, and the trait provides , reporting an offence /// triggered by a valid equivocation report, and also for creating and /// submitting equivocation report extrinsics (useful only in offchain context). -pub trait HandleEquivocation { +pub trait HandleEquivocation { /// The offence type used for reporting offences on valid equivocation reports. type Offence: GrandpaOffence; + /// The longevity, in blocks, that the equivocation report is valid for. When using the staking + /// pallet this should be equal to the bonding duration (in blocks, not eras). + type ReportLongevity: Get; + /// Report an offence proved by the given reporters. fn report_offence( reporters: Vec, @@ -86,8 +93,9 @@ pub trait HandleEquivocation { fn block_author() -> Option; } -impl HandleEquivocation for () { +impl HandleEquivocation for () { type Offence = GrandpaEquivocationOffence; + type ReportLongevity = (); fn report_offence( _reporters: Vec, @@ -119,11 +127,11 @@ impl HandleEquivocation for () { /// using existing subsystems that are part of frame (type bounds described /// below) and will dispatch to them directly, it's only purpose is to wire all /// subsystems together. -pub struct EquivocationHandler> { - _phantom: sp_std::marker::PhantomData<(I, R, O)>, +pub struct EquivocationHandler> { + _phantom: sp_std::marker::PhantomData<(I, R, L, O)>, } -impl Default for EquivocationHandler { +impl Default for EquivocationHandler { fn default() -> Self { Self { _phantom: Default::default(), @@ -131,19 +139,23 @@ impl Default for EquivocationHandler { } } -impl HandleEquivocation for EquivocationHandler +impl HandleEquivocation for EquivocationHandler where // We use the authorship pallet to fetch the current block author and use // `offchain::SendTransactionTypes` for unsigned extrinsic creation and // submission. - T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, // A system for reporting offences after valid equivocation reports are // processed. R: ReportOffence, + // The longevity (in blocks) that the equivocation report is valid for. When using the staking + // pallet this should be the bonding duration. + L: Get, // The offence type that should be used when reporting. O: GrandpaOffence, { type Offence = O; + type ReportLongevity = L; fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { R::report_offence(reporters, offence) @@ -187,10 +199,10 @@ pub struct GrandpaTimeSlot { /// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` /// to local calls (i.e. extrinsics generated on this node) or that already in a block. This /// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { + if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { // discard equivocation report not coming from the local node match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } @@ -204,6 +216,11 @@ impl frame_support::unsigned::ValidateUnsigned for Module { } } + // check report staleness + is_known_offence::(equivocation_proof, key_owner_proof)?; + + let longevity = >::ReportLongevity::get(); + ValidTransaction::with_tag_prefix("GrandpaEquivocation") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) @@ -213,6 +230,7 @@ impl frame_support::unsigned::ValidateUnsigned for Module { equivocation_proof.set_id(), equivocation_proof.round(), )) + .longevity(longevity) // We don't propagate this. This can never be included on a remote node. .propagate(false) .build() @@ -223,36 +241,42 @@ impl frame_support::unsigned::ValidateUnsigned for Module { fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { - // check the membership proof to extract the offender's id - let key = ( - sp_finality_grandpa::KEY_TYPE, - equivocation_proof.offender().clone(), - ); - - let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) - .ok_or(InvalidTransaction::BadProof)?; - - // check if the offence has already been reported, - // and if so then we can discard the report. - let time_slot = - >::Offence::new_time_slot( - equivocation_proof.set_id(), - equivocation_proof.round(), - ); - - let is_known_offence = T::HandleEquivocation::is_known_offence(&[offender], &time_slot); - - if is_known_offence { - Err(InvalidTransaction::Stale.into()) - } else { - Ok(()) - } + is_known_offence::(equivocation_proof, key_owner_proof) } else { Err(InvalidTransaction::Call.into()) } } } +fn is_known_offence( + equivocation_proof: &EquivocationProof, + key_owner_proof: &T::KeyOwnerProof, +) -> Result<(), TransactionValidityError> { + // check the membership proof to extract the offender's id + let key = ( + sp_finality_grandpa::KEY_TYPE, + equivocation_proof.offender().clone(), + ); + + let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) + .ok_or(InvalidTransaction::BadProof)?; + + // check if the offence has already been reported, + // and if so then we can discard the report. + let time_slot = >::Offence::new_time_slot( + equivocation_proof.set_id(), + equivocation_proof.round(), + ); + + let is_known_offence = T::HandleEquivocation::is_known_offence(&[offender], &time_slot); + + if is_known_offence { + Err(InvalidTransaction::Stale.into()) + } else { + Ok(()) + } +} + /// A grandpa equivocation offence report. #[allow(dead_code)] pub struct GrandpaEquivocationOffence { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index fe836ac913cb6..b68624df7b5dc 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +41,7 @@ use fg_primitives::{ }; use frame_support::{ decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, - storage, traits::KeyOwnerProofSystem, weights::{Pays, Weight}, Parameter, + storage, traits::{OneSessionHandler, KeyOwnerProofSystem}, weights::{Pays, Weight}, Parameter, }; use frame_system::{ensure_none, ensure_root, ensure_signed}; use sp_runtime::{ @@ -67,9 +67,9 @@ pub use equivocation::{ HandleEquivocation, }; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The event type of this module. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// The function call. type Call: From>; @@ -188,7 +188,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Attempt to signal GRANDPA pause when the authority set isn't live /// (either paused or already pending pause). PauseFailed, @@ -209,7 +209,7 @@ decl_error! { } decl_storage! { - trait Store for Module as GrandpaFinality { + trait Store for Module as GrandpaFinality { /// State of the current authority set. State get(fn state): StoredState = StoredState::Live; @@ -241,7 +241,7 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -372,7 +372,7 @@ decl_module! { } } -impl Module { +impl Module { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() @@ -583,12 +583,12 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module - where T: pallet_session::Trait +impl OneSessionHandler for Module + where T: pallet_session::Config { type Key = AuthorityId; diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index d3461eec12dc4..e8703dba50ae5 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,11 @@ #![cfg(test)] -use crate::{AuthorityId, AuthorityList, ConsensusLog, Module, Trait}; +use crate::{AuthorityId, AuthorityList, ConsensusLog, Config, self as pallet_grandpa}; use ::grandpa as finality_grandpa; use codec::Encode; use frame_support::{ - impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, + parameter_types, traits::{KeyOwnerProofSystem, OnFinalize, OnInitialize}, weights::Weight, }; @@ -40,17 +40,27 @@ use sp_runtime::{ DigestItem, Perbill, }; use sp_staking::SessionIndex; - -impl_outer_origin! { - pub enum Origin for Test {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_grandpa::Grandpa, - pallet_staking::Staking, +use pallet_session::historical as pallet_session_historical; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Module, Call, Storage, Event}, + Historical: pallet_session_historical::{Module}, } -} +); impl_opaque_keys! { pub struct TestSessionKeys { @@ -58,29 +68,17 @@ impl_opaque_keys! { } } -impl_outer_event! { - pub enum TestEvent for Test { - frame_system, - pallet_balances, - pallet_grandpa, - pallet_offences, - pallet_session, - pallet_staking, - } -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -90,21 +88,15 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } impl frame_system::offchain::SendTransactionTypes for Test @@ -122,8 +114,8 @@ parameter_types! { } /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. -impl pallet_session::Trait for Test { - type Event = TestEvent; +impl pallet_session::Config for Test { + type Event = Event; type ValidatorId = u64; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -135,7 +127,7 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -144,7 +136,7 @@ parameter_types! { pub const UncleGenerations: u64 = 0; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = (); type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -155,11 +147,11 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u128; type DustRemoval = (); - type Event = TestEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -169,7 +161,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 3; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -198,10 +190,10 @@ parameter_types! { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type Event = TestEvent; + type Event = Event; type Currency = Balances; type Slash = (); type Reward = (); @@ -224,18 +216,23 @@ impl pallet_staking::Trait for Test { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Test { - type Event = TestEvent; +impl pallet_offences::Config for Test { + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl Trait for Test { - type Event = TestEvent; +parameter_types! { + pub const ReportLongevity: u64 = + BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * Period::get(); +} + +impl Config for Test { + type Event = Event; type Call = Call; type KeyOwnerProofSystem = Historical; @@ -248,24 +245,12 @@ impl Trait for Test { AuthorityId, )>>::IdentificationTuple; - type HandleEquivocation = super::EquivocationHandler; + type HandleEquivocation = + super::EquivocationHandler; type WeightInfo = (); } -mod pallet_grandpa { - pub use crate::Event; -} - -pub type Balances = pallet_balances::Module; -pub type Historical = pallet_session::historical::Module; -pub type Offences = pallet_offences::Module; -pub type Session = pallet_session::Module; -pub type Staking = pallet_staking::Module; -pub type System = frame_system::Module; -pub type Timestamp = pallet_timestamp::Module; -pub type Grandpa = Module; - pub fn grandpa_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()) } @@ -291,6 +276,14 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx .build_storage::() .unwrap(); + let balances: Vec<_> = (0..authorities.len()) + .map(|i| (i as u64, 10_000_000)) + .collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + // stashes are the index. let session_keys: Vec<_> = authorities .iter() @@ -306,6 +299,12 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx }) .collect(); + // NOTE: this will initialize the grandpa authorities + // through OneSessionHandler::on_genesis_session + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + // controllers are the index + 1000 let stakers: Vec<_> = (0..authorities.len()) .map(|i| { @@ -318,20 +317,6 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx }) .collect(); - let balances: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, 10_000_000)) - .collect(); - - // NOTE: this will initialize the grandpa authorities - // through OneSessionHandler::on_genesis_session - pallet_session::GenesisConfig:: { keys: session_keys } - .assimilate_storage(&mut t) - .unwrap(); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - let staking_config = pallet_staking::GenesisConfig:: { stakers, validator_count: 8, @@ -364,7 +349,6 @@ pub fn start_session(session_index: SessionIndex) { &(i as u64 + 1), &parent_hash, &Default::default(), - &Default::default(), Default::default(), ); System::set_block_number((i + 1).into()); @@ -389,7 +373,6 @@ pub fn initialize_block(number: u64, parent_hash: H256) { &number, &parent_hash, &Default::default(), - &Default::default(), Default::default(), ); } diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 4916808fe000f..50462d33472a9 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +19,16 @@ #![cfg(test)] -use super::{Call, *}; +use super::{Call, Event, *}; use crate::mock::*; use codec::{Decode, Encode}; use fg_primitives::ScheduledChange; use frame_support::{ assert_err, assert_ok, - traits::{Currency, OnFinalize}, + traits::{Currency, OnFinalize, OneSessionHandler}, weights::{GetDispatchInfo, Pays}, }; use frame_system::{EventRecord, Phase}; -use pallet_session::OneSessionHandler; use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::testing::Digest; @@ -707,8 +706,8 @@ fn report_equivocation_invalid_equivocation_proof() { #[test] fn report_equivocation_validate_unsigned_prevents_duplicates() { use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, - TransactionValidity, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, }; let authorities = test_authorities(); @@ -763,7 +762,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { priority: TransactionPriority::max_value(), requires: vec![], provides: vec![("GrandpaEquivocation", tx_tag).encode()], - longevity: TransactionLongevity::max_value(), + longevity: ReportLongevity::get(), propagate: false, }) ); @@ -776,6 +775,15 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { .unwrap(); // the report should now be considered stale and the transaction is invalid + // the check for staleness should be done on both `validate_unsigned` and on `pre_dispatch` + assert_err!( + ::validate_unsigned( + TransactionSource::Local, + &call, + ), + InvalidTransaction::Stale, + ); + assert_err!( ::pre_dispatch(&call), InvalidTransaction::Stale, @@ -850,7 +858,7 @@ fn report_equivocation_has_valid_weight() { // but there's a lower bound of 100 validators. assert!( (1..=100) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] == w[1]) @@ -860,7 +868,7 @@ fn report_equivocation_has_valid_weight() { // with every extra validator. assert!( (100..=1000) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] < w[1]) diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 08777c44ad2b1..3fd0c30a0f83b 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-identity" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/identity/README.md b/frame/identity/README.md index 8927febec6bbd..38e16d4dd4902 100644 --- a/frame/identity/README.md +++ b/frame/identity/README.md @@ -51,6 +51,6 @@ no state-bloat attack is viable. * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index d7876514452e2..e916bdfa50461 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,16 +29,16 @@ use crate::Module as Identity; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } // Adds `r` registrars to the Identity Pallet. These registrars will have set fees and fields. -fn add_registrars(r: u32) -> Result<(), &'static str> { +fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); @@ -57,7 +57,7 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { // Create `s` sub-accounts for the identity of `who` and return them. // Each will have 32 bytes of raw data added to it. -fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { let mut subs = Vec::new(); let who_origin = RawOrigin::Signed(who.clone()); let data = Data::Raw(vec![0; 32]); @@ -77,7 +77,7 @@ fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { let who_origin = RawOrigin::Signed(who.clone()); let subs = create_sub_accounts::(who, s)?; @@ -88,7 +88,7 @@ fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result(num_fields: u32) -> IdentityInfo { +fn create_identity_info(num_fields: u32) -> IdentityInfo { let data = Data::Raw(vec![0; 32]); let info = IdentityInfo { @@ -107,25 +107,6 @@ fn create_identity_info(num_fields: u32) -> IdentityInfo { } benchmarks! { - // These are the common parameters along with their instancing. - _ { - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - // extra parameter for the set_subs bench for previous sub accounts - let p in 1 .. T::MaxSubAccounts::get() => (); - let s in 1 .. T::MaxSubAccounts::get() => { - // Give them s many sub accounts - let caller: T::AccountId = whitelisted_caller(); - let _ = add_sub_accounts::(&caller, s)?; - }; - let x in 1 .. T::MaxAdditionalFields::get() => { - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); - Identity::::set_identity(caller_origin, info)?; - }; - } - add_registrar { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); @@ -135,15 +116,13 @@ benchmarks! { } set_identity { - let r in ...; - // This X doesn't affect the caller ID up front like with the others, so we don't use the - // standard preparation. - let x in _ .. _ => (); + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let x in 1 .. T::MaxAdditionalFields::get(); let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Add an initial identity @@ -200,13 +179,23 @@ benchmarks! { clear_identity { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let s in ...; - let x in ...; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let s in 1 .. T::MaxSubAccounts::get() => { + // Give them s many sub accounts + let caller: T::AccountId = whitelisted_caller(); + let _ = add_sub_accounts::(&caller, s)?; + }; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, info)?; + }; // User requests judgement from all the registrars, and they approve for i in 0..r { @@ -228,8 +217,14 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let x in ...; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, info)?; + }; }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) verify { assert_last_event::(Event::::JudgementRequested(caller, r-1).into()); @@ -237,11 +232,17 @@ benchmarks! { cancel_request { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let x in ...; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, info)?; + }; Identity::::request_judgement(caller_origin, r - 1, 10u32.into())?; }: _(RawOrigin::Signed(caller.clone()), r - 1) @@ -300,7 +301,7 @@ benchmarks! { provide_judgement { // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); + let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); @@ -308,8 +309,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - // For this x, it's the user identity that gts the fields, not the caller. - let x in _ .. _ => { + let x in 1 .. T::MaxAdditionalFields::get() => { let info = create_identity_info::(x); Identity::::set_identity(user_origin.clone(), info)?; }; @@ -322,13 +322,12 @@ benchmarks! { } kill_identity { - let r in ...; - // Setting up our own account below. - let s in _ .. _ => {}; - let x in _ .. _ => {}; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let s in 1 .. T::MaxSubAccounts::get(); + let x in 1 .. T::MaxAdditionalFields::get(); let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); + let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 0ee6563a5611d..fed32afa2e62f 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Identity Module //! -//! - [`identity::Trait`](./trait.Trait.html) +//! - [`identity::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -68,7 +68,7 @@ //! * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -91,12 +91,12 @@ use frame_support::{ use frame_system::ensure_signed; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency trait. type Currency: ReservableCurrency; @@ -399,7 +399,7 @@ pub struct RegistrarInfo< } decl_storage! { - trait Store for Module as Identity { + trait Store for Module as Identity { /// Information that is pertinent to identify the entity behind an account. /// /// TWOX-NOTE: OK ― `AccountId` is a secure hash. @@ -428,7 +428,7 @@ decl_storage! { } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// A name was set or reset (which will remove all judgements). \[who\] IdentitySet(AccountId), /// A name was cleared, and the given balance returned. \[who, deposit\] @@ -456,7 +456,7 @@ decl_event!( decl_error! { /// Error for the identity module. - pub enum Error for Module { + pub enum Error for Module { /// Too many subs-accounts. TooManySubAccounts, /// Account isn't found. @@ -494,7 +494,7 @@ decl_error! { decl_module! { /// Identity module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The amount held on deposit for a registered identity. const BasicDeposit: BalanceOf = T::BasicDeposit::get(); @@ -1125,7 +1125,7 @@ decl_module! { } } -impl Module { +impl Module { /// Get the subs of an account. pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { SubsOf::::get(who).1 @@ -1134,3 +1134,4 @@ impl Module { .collect() } } + diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 0637ac6aafc5f..230079a21ea0d 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,63 +18,66 @@ // Tests for Identity Pallet use super::*; +use crate as pallet_identity; use sp_runtime::traits::BadOrigin; -use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types, -}; +use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; use sp_core::H256; use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Identity: pallet_identity::{Module, Call, Storage, Event}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -103,8 +106,8 @@ type EnsureTwoOrRoot = EnsureOneOf< EnsureRoot, EnsureSignedBy >; -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type Currency = Balances; type Slashed = (); type BasicDeposit = BasicDeposit; @@ -117,9 +120,6 @@ impl Trait for Test { type ForceOrigin = EnsureTwoOrRoot; type WeightInfo = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Identity = Module; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 44efbb31035ef..1026e8f73f85c 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -64,7 +64,7 @@ pub trait WeightInfo { /// Weights for pallet_identity using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn add_registrar(r: u32, ) -> Weight { (28_965_000 as Weight) .saturating_add((421_000 as Weight).saturating_mul(r as Weight)) diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index ef22d67688732..bde041c43764a 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-im-online" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,23 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +pallet-session = { version = "3.0.0", path = "../session" } [features] -default = ["std", "pallet-session/historical"] +default = ["std"] std = [ "sp-application-crypto/std", "pallet-authorship/std", @@ -37,7 +39,6 @@ std = [ "sp-core/std", "sp-std/std", "serde", - "pallet-session/std", "sp-io/std", "sp-runtime/std", "sp-staking/std", diff --git a/frame/im-online/README.md b/frame/im-online/README.md index 9a65bb6a98086..a2ed5edc906a2 100644 --- a/frame/im-online/README.md +++ b/frame/im-online/README.md @@ -30,10 +30,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_im_online::{self as im_online}; -pub trait Trait: im_online::Trait {} +pub trait Config: im_online::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -48,4 +48,4 @@ decl_module! { This module depends on the [Session module](https://docs.rs/pallet-session/latest/pallet_session/). -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index b92be023ce480..ef7f66307a99d 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +34,7 @@ use crate::Module as ImOnline; const MAX_KEYS: u32 = 1000; const MAX_EXTERNAL_ADDRESSES: u32 = 100; -pub fn create_heartbeat(k: u32, e: u32) -> +pub fn create_heartbeat(k: u32, e: u32) -> Result<(crate::Heartbeat, ::Signature), &'static str> { let mut keys = Vec::new(); @@ -63,8 +63,6 @@ pub fn create_heartbeat(k: u32, e: u32) -> } benchmarks! { - _{ } - #[extra] heartbeat { let k in 1 .. MAX_KEYS; diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 2d3693d127207..bd597acfb1ed5 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,7 +30,7 @@ //! as the [NetworkState](../../client/offchain/struct.NetworkState.html). //! It is submitted as an Unsigned Transaction via off-chain workers. //! -//! - [`im_online::Trait`](./trait.Trait.html) +//! - [`im_online::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -47,10 +47,10 @@ //! use frame_system::ensure_signed; //! use pallet_im_online::{self as im_online}; //! -//! pub trait Trait: im_online::Trait {} +//! pub trait Config: im_online::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; @@ -79,7 +79,6 @@ use codec::{Encode, Decode}; use sp_core::offchain::OpaqueNetworkState; use sp_std::prelude::*; use sp_std::convert::TryInto; -use pallet_session::historical::IdentificationTuple; use sp_runtime::{ offchain::storage::StorageValueRef, RuntimeDebug, @@ -95,7 +94,7 @@ use sp_staking::{ }; use frame_support::{ decl_module, decl_event, decl_storage, Parameter, debug, decl_error, - traits::Get, + traits::{Get, ValidatorSet, ValidatorSetWithIdentification, OneSessionHandler}, }; use frame_system::ensure_none; use frame_system::offchain::{ @@ -227,12 +226,24 @@ pub struct Heartbeat pub validators_len: u32, } -pub trait Trait: SendTransactionTypes> + pallet_session::historical::Trait { +/// A type for representing the validator id in a session. +pub type ValidatorId = < + ::ValidatorSet as ValidatorSet<::AccountId> +>::ValidatorId; + +/// A tuple of (ValidatorId, Identification) where `Identification` is the full identification of `ValidatorId`. +pub type IdentificationTuple = ( + ValidatorId, + <::ValidatorSet as + ValidatorSetWithIdentification<::AccountId>>::Identification, +); + +pub trait Config: SendTransactionTypes> + frame_system::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// An expected duration of the session. /// @@ -242,6 +253,9 @@ pub trait Trait: SendTransactionTypes> + pallet_session::historical:: /// there is a chance the authority will produce a block and they won't be necessary. type SessionDuration: Get; + /// A type for retrieving the validators supposed to be online in a session. + type ValidatorSet: ValidatorSetWithIdentification; + /// A type that gives us the ability to submit unresponsiveness offence reports. type ReportUnresponsiveness: ReportOffence< @@ -262,7 +276,7 @@ pub trait Trait: SendTransactionTypes> + pallet_session::historical:: decl_event!( pub enum Event where - ::AuthorityId, + ::AuthorityId, IdentificationTuple = IdentificationTuple, { /// A new heartbeat was received from `AuthorityId` \[authority_id\] @@ -275,7 +289,7 @@ decl_event!( ); decl_storage! { - trait Store for Module as ImOnline { + trait Store for Module as ImOnline { /// The block number after which it's ok to send heartbeats in current session. /// /// At the beginning of each session we set this to a value that should @@ -293,10 +307,10 @@ decl_storage! { double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) AuthIndex => Option>; - /// For each session index, we keep a mapping of `T::ValidatorId` to the + /// For each session index, we keep a mapping of `ValidatorId` to the /// number of blocks authored by the given authority. AuthoredBlocks get(fn authored_blocks): - double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) T::ValidatorId + double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) ValidatorId => u32; } add_extra_genesis { @@ -307,7 +321,7 @@ decl_storage! { decl_error! { /// Error for the im-online module. - pub enum Error for Module { + pub enum Error for Module { /// Non existent public key. InvalidKey, /// Duplicated heartbeat. @@ -316,7 +330,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -332,7 +346,7 @@ decl_module! { /// #
// NOTE: the weight includes the cost of validate_unsigned as it is part of the cost to // import block with such an extrinsic. - #[weight = ::WeightInfo::validate_unsigned_and_then_heartbeat( + #[weight = ::WeightInfo::validate_unsigned_and_then_heartbeat( heartbeat.validators_len as u32, heartbeat.network_state.external_addresses.len() as u32, )] @@ -345,7 +359,7 @@ decl_module! { ) { ensure_none(origin)?; - let current_session = >::current_index(); + let current_session = T::ValidatorSet::session_index(); let exists = ::contains_key( ¤t_session, &heartbeat.authority_index @@ -393,27 +407,30 @@ decl_module! { } } -type OffchainResult = Result::BlockNumber>>; +type OffchainResult = Result::BlockNumber>>; /// Keep track of number of authored blocks per authority, uncles are counted as /// well since they're a valid proof of being online. -impl pallet_authorship::EventHandler for Module { - fn note_author(author: T::ValidatorId) { +impl< + T: Config + pallet_authorship::Config, +> pallet_authorship::EventHandler, T::BlockNumber> for Module +{ + fn note_author(author: ValidatorId) { Self::note_authorship(author); } - fn note_uncle(author: T::ValidatorId, _age: T::BlockNumber) { + fn note_uncle(author: ValidatorId, _age: T::BlockNumber) { Self::note_authorship(author); } } -impl Module { +impl Module { /// Returns `true` if a heartbeat has been received for the authority at /// `authority_index` in the authorities series or if the authority has /// authored at least one block, during the current session. Otherwise /// `false`. pub fn is_online(authority_index: AuthIndex) -> bool { - let current_validators = >::validators(); + let current_validators = T::ValidatorSet::validators(); if authority_index >= current_validators.len() as u32 { return false; @@ -424,8 +441,8 @@ impl Module { Self::is_online_aux(authority_index, authority) } - fn is_online_aux(authority_index: AuthIndex, authority: &T::ValidatorId) -> bool { - let current_session = >::current_index(); + fn is_online_aux(authority_index: AuthIndex, authority: &ValidatorId) -> bool { + let current_session = T::ValidatorSet::session_index(); ::contains_key(¤t_session, &authority_index) || >::get( @@ -437,13 +454,13 @@ impl Module { /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in /// the authorities series, during the current session. Otherwise `false`. pub fn received_heartbeat_in_current_session(authority_index: AuthIndex) -> bool { - let current_session = >::current_index(); + let current_session = T::ValidatorSet::session_index(); ::contains_key(¤t_session, &authority_index) } /// Note that the given authority has authored a block in the current session. - fn note_authorship(author: T::ValidatorId) { - let current_session = >::current_index(); + fn note_authorship(author: ValidatorId) { + let current_session = T::ValidatorSet::session_index(); >::mutate( ¤t_session, @@ -460,8 +477,8 @@ impl Module { return Err(OffchainErr::TooEarly(heartbeat_after)) } - let session_index = >::current_index(); - let validators_len = >::validators().len() as u32; + let session_index = T::ValidatorSet::session_index(); + let validators_len = Keys::::decode_len().unwrap_or_default() as u32; Ok(Self::local_authority_keys() .map(move |(authority_index, key)| @@ -610,11 +627,11 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = T::AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Module { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -639,22 +656,24 @@ impl pallet_session::OneSessionHandler for Module { } fn on_before_session_ending() { - let session_index = >::current_index(); + let session_index = T::ValidatorSet::session_index(); let keys = Keys::::get(); - let current_validators = >::validators(); + let current_validators = T::ValidatorSet::validators(); let offenders = current_validators.into_iter().enumerate() .filter(|(index, id)| !Self::is_online_aux(*index as u32, id) ).filter_map(|(_, id)| - T::FullIdentificationOf::convert(id.clone()).map(|full_id| (id, full_id)) + >::IdentificationOf::convert( + id.clone() + ).map(|full_id| (id, full_id)) ).collect::>>(); // Remove all received heartbeats and number of authored blocks from the // current session, they have already been processed and won't be needed // anymore. - ::remove_prefix(&>::current_index()); - >::remove_prefix(&>::current_index()); + ::remove_prefix(&T::ValidatorSet::session_index()); + >::remove_prefix(&T::ValidatorSet::session_index()); if offenders.is_empty() { Self::deposit_event(RawEvent::AllGood); @@ -677,7 +696,7 @@ impl pallet_session::OneSessionHandler for Module { /// Invalid transaction custom error. Returned when validators_len field in heartbeat is incorrect. const INVALID_VALIDATORS_LEN: u8 = 10; -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned( @@ -691,7 +710,7 @@ impl frame_support::unsigned::ValidateUnsigned for Module { } // check if session index from heartbeat is recent - let current_session = >::current_index(); + let current_session = T::ValidatorSet::session_index(); if heartbeat.session_index != current_session { return InvalidTransaction::Stale.into(); } diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index dae4bb3447e56..1b80f5b12dedb 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,23 +21,31 @@ use std::cell::RefCell; -use crate::{Module, Trait}; +use crate::Config; use sp_runtime::Perbill; use sp_staking::{SessionIndex, offence::{ReportOffence, OffenceError}}; use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; use sp_runtime::traits::{IdentityLookup, BlakeTwo256, ConvertInto}; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types, weights::Weight}; - -impl_outer_origin!{ - pub enum Origin for Runtime {} -} - -impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - imonline::ImOnline, +use frame_support::parameter_types; +use crate as imonline; +use pallet_session::historical as pallet_session_historical; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + ImOnline: imonline::{Module, Call, Storage, Config, Event}, + Historical: pallet_session_historical::{Module}, } -} +); thread_local! { pub static VALIDATORS: RefCell>> = RefCell::new(Some(vec![ @@ -99,18 +107,17 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Runtime; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -120,21 +127,15 @@ impl frame_system::Trait for Runtime { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { @@ -146,20 +147,20 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type ShouldEndSession = pallet_session::PeriodicSessions; type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = (ImOnline, ); type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = UintAuthorityId; - type Event = (); + type Event = Event; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = u64; type FullIdentificationOf = ConvertInto; } @@ -168,7 +169,7 @@ parameter_types! { pub const UncleGenerations: u32 = 5; } -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = (); type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -179,10 +180,11 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Runtime { +impl Config for Runtime { type AuthorityId = UintAuthorityId; - type Event = (); + type Event = Event; type ReportUnresponsiveness = OffenceHandler; + type ValidatorSet = Historical; type SessionDuration = Period; type UnsignedPriority = UnsignedPriority; type WeightInfo = (); @@ -195,11 +197,6 @@ impl frame_system::offchain::SendTransactionTypes for Runt type Extrinsic = Extrinsic; } -/// Im Online module. -pub type ImOnline = Module; -pub type System = frame_system::Module; -pub type Session = pallet_session::Module; - pub fn advance_session() { let now = System::block_number().max(1); System::set_block_number(now + 1); diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 22c6b4464c370..dc6fc4f37330e 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index f9df679bd2bea..8f4140fc793a0 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +49,7 @@ pub trait WeightInfo { /// Weights for pallet_im_online using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { (114_379_000 as Weight) .saturating_add((219_000 as Weight).saturating_mul(k as Weight)) diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index aea8dbf1a866e..cde3cdeeecba5 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-indices" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-keyring = { version = "3.0.0", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/indices/src/address.rs b/frame/indices/src/address.rs deleted file mode 100644 index 0fd8933381328..0000000000000 --- a/frame/indices/src/address.rs +++ /dev/null @@ -1,159 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Address type that is union of index and id for an account. - -#[cfg(feature = "std")] -use std::fmt; -use sp_std::convert::TryInto; -use crate::Member; -use codec::{Encode, Decode, Input, Output, Error}; - -/// An indices-aware address, which can be either a direct `AccountId` or -/// an index. -#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Hash))] -pub enum Address where - AccountId: Member, - AccountIndex: Member, -{ - /// It's an account ID (pubkey). - Id(AccountId), - /// It's an account index. - Index(AccountIndex), -} - -#[cfg(feature = "std")] -impl fmt::Display for Address where - AccountId: Member, - AccountIndex: Member, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl From for Address where - AccountId: Member, - AccountIndex: Member, -{ - fn from(a: AccountId) -> Self { - Address::Id(a) - } -} - -fn need_more_than(a: T, b: T) -> Result { - if a < b { Ok(b) } else { Err("Invalid range".into()) } -} - -impl Decode for Address where - AccountId: Member + Decode, - AccountIndex: Member + Decode + PartialOrd + Ord + From + Copy, -{ - fn decode(input: &mut I) -> Result { - Ok(match input.read_byte()? { - x @ 0x00..=0xef => Address::Index(AccountIndex::from(x as u32)), - 0xfc => Address::Index(AccountIndex::from( - need_more_than(0xef, u16::decode(input)?)? as u32 - )), - 0xfd => Address::Index(AccountIndex::from( - need_more_than(0xffff, u32::decode(input)?)? - )), - 0xfe => Address::Index( - need_more_than(0xffffffffu32.into(), Decode::decode(input)?)? - ), - 0xff => Address::Id(Decode::decode(input)?), - _ => return Err("Invalid address variant".into()), - }) - } -} - -impl Encode for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{ - fn encode_to(&self, dest: &mut T) { - match *self { - Address::Id(ref i) => { - dest.push_byte(255); - dest.push(i); - } - Address::Index(i) => { - let maybe_u32: Result = i.try_into(); - if let Ok(x) = maybe_u32 { - if x > 0xffff { - dest.push_byte(253); - dest.push(&x); - } - else if x >= 0xf0 { - dest.push_byte(252); - dest.push(&(x as u16)); - } - else { - dest.push_byte(x as u8); - } - - } else { - dest.push_byte(254); - dest.push(&i); - } - }, - } - } -} - -impl codec::EncodeLike for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{} - -impl Default for Address where - AccountId: Member + Default, - AccountIndex: Member, -{ - fn default() -> Self { - Address::Id(Default::default()) - } -} - -#[cfg(test)] -mod tests { - use codec::{Encode, Decode}; - - type Address = super::Address<[u8; 8], u32>; - fn index(i: u32) -> Address { super::Address::Index(i) } - fn id(i: [u8; 8]) -> Address { super::Address::Id(i) } - - fn compare(a: Option
, d: &[u8]) { - if let Some(ref a) = a { - assert_eq!(d, &a.encode()[..]); - } - assert_eq!(Address::decode(&mut &d[..]).ok(), a); - } - - #[test] - fn it_should_work() { - compare(Some(index(2)), &[2][..]); - compare(None, &[240][..]); - compare(None, &[252, 239, 0][..]); - compare(Some(index(240)), &[252, 240, 0][..]); - compare(Some(index(304)), &[252, 48, 1][..]); - compare(None, &[253, 255, 255, 0, 0][..]); - compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); - compare(Some(id([42, 69, 42, 69, 42, 69, 42, 69])), &[255, 42, 69, 42, 69, 42, 69, 42, 69][..]); - } -} diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index 382bf07f1136e..f83e05ee9c627 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,8 +29,6 @@ use crate::Module as Indices; const SEED: u32 = 0; benchmarks! { - _ { } - claim { let account_index = T::AccountIndex::from(SEED); let caller: T::AccountId = whitelisted_caller(); diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index aa645d0cb9ebc..c925d3a0533e0 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,13 +21,13 @@ #![cfg_attr(not(feature = "std"), no_std)] mod mock; -pub mod address; mod tests; mod benchmarking; pub mod weights; use sp_std::prelude::*; use codec::Codec; +use sp_runtime::MultiAddress; use sp_runtime::traits::{ StaticLookup, Member, LookupError, Zero, Saturating, AtLeast32Bit }; @@ -35,14 +35,12 @@ use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage use frame_support::dispatch::DispatchResult; use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; use frame_system::{ensure_signed, ensure_root}; -use self::address::Address as RawAddress; pub use weights::WeightInfo; -pub type Address = RawAddress<::AccountId, ::AccountIndex>; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The module's config trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Type used for storing an account's index; implies the maximum number of accounts the system /// can hold. type AccountIndex: Parameter + Member + Codec + Default + AtLeast32Bit + Copy; @@ -54,14 +52,14 @@ pub trait Trait: frame_system::Trait { type Deposit: Get>; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } decl_storage! { - trait Store for Module as Indices { + trait Store for Module as Indices { /// The lookup from index to account. pub Accounts build(|config: &GenesisConfig| config.indices.iter() @@ -77,20 +75,20 @@ decl_storage! { decl_event!( pub enum Event where - ::AccountId, - ::AccountIndex + ::AccountId, + ::AccountIndex { - /// A account index was assigned. \[who, index\] + /// A account index was assigned. \[index, who\] IndexAssigned(AccountId, AccountIndex), /// A account index has been freed up (unassigned). \[index\] IndexFreed(AccountIndex), - /// A account index has been frozen to its current account ID. \[who, index\] + /// A account index has been frozen to its current account ID. \[index, who\] IndexFrozen(AccountIndex, AccountId), } ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// The index was not already assigned. NotAssigned, /// The index is assigned to another account. @@ -105,7 +103,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = frame_system { + pub struct Module for enum Call where origin: T::Origin, system = frame_system { /// The deposit needed for reserving an index. const Deposit: BalanceOf = T::Deposit::get(); @@ -277,7 +275,7 @@ decl_module! { } } -impl Module { +impl Module { // PUBLIC IMMUTABLES /// Lookup an T::AccountIndex to get an Id, if there's one there. @@ -287,17 +285,18 @@ impl Module { /// Lookup an address to get an Id, if there's one there. pub fn lookup_address( - a: address::Address + a: MultiAddress ) -> Option { match a { - address::Address::Id(i) => Some(i), - address::Address::Index(i) => Self::lookup_index(i), + MultiAddress::Id(i) => Some(i), + MultiAddress::Index(i) => Self::lookup_index(i), + _ => None, } } } -impl StaticLookup for Module { - type Source = address::Address; +impl StaticLookup for Module { + type Source = MultiAddress; type Target = T::AccountId; fn lookup(a: Self::Source) -> Result { @@ -305,6 +304,6 @@ impl StaticLookup for Module { } fn unlookup(a: Self::Target) -> Self::Source { - address::Address::Id(a) + MultiAddress::Id(a) } } diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index cfbd2e38c3d3f..06c73b1a9bc27 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,39 +20,38 @@ #![cfg(test)] use sp_runtime::testing::Header; -use sp_runtime::Perbill; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types, weights::Weight}; -use crate::{self as indices, Module, Trait}; -use frame_system as system; -use pallet_balances as balances; +use frame_support::parameter_types; +use crate::{self as pallet_indices, Config}; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event!{ - pub enum MetaEvent for Test { - system, - balances, - indices, - } -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + } +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -60,32 +59,26 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = Indices; type Header = Header; - type Event = MetaEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); - type Event = MetaEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -95,11 +88,11 @@ parameter_types! { pub const Deposit: u64 = 1; } -impl Trait for Test { +impl Config for Test { type AccountIndex = u64; type Currency = Balances; type Deposit = Deposit; - type Event = MetaEvent; + type Event = Event; type WeightInfo = (); } @@ -110,7 +103,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities { }.assimilate_storage(&mut t).unwrap(); t.into() } - -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; -pub type Indices = Module; diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index e288871d55307..96b8c4acfcd2d 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 36d990cec52aa..6cc9593d20b90 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,7 +53,7 @@ pub trait WeightInfo { /// Weights for pallet_indices using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn claim() -> Weight { (53_799_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml new file mode 100644 index 0000000000000..05bb7e385f5dd --- /dev/null +++ b/frame/lottery/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "pallet-lottery" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Participation Lottery Pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } + +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +pallet-balances = { version = "3.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +serde = { version = "1.0.101" } + +[features] +default = ["std"] +std = [ + "codec/std", + "sp-std/std", + "frame-support/std", + "sp-runtime/std", + "frame-system/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-system/runtime-benchmarks", + "frame-support/runtime-benchmarks", +] diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs new file mode 100644 index 0000000000000..b9b0d7fd0002c --- /dev/null +++ b/frame/lottery/src/benchmarking.rs @@ -0,0 +1,190 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Lottery pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_system::RawOrigin; +use frame_support::traits::{OnInitialize, UnfilteredDispatchable}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use sp_runtime::traits::{Bounded, Zero}; + +use crate::Module as Lottery; + +// Set up and start a lottery +fn setup_lottery(repeat: bool) -> Result<(), &'static str> { + let price = T::Currency::minimum_balance(); + let length = 10u32.into(); + let delay = 5u32.into(); + // Calls will be maximum length... + let mut calls = vec![ + frame_system::Call::::set_code(vec![]).into(); + T::MaxCalls::get().saturating_sub(1) + ]; + // Last call will be the match for worst case scenario. + calls.push(frame_system::Call::::remark(vec![]).into()); + let origin = T::ManagerOrigin::successful_origin(); + Lottery::::set_calls(origin.clone(), calls)?; + Lottery::::start_lottery(origin, price, length, delay, repeat)?; + Ok(()) +} + +benchmarks! { + buy_ticket { + let caller = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + setup_lottery::(false)?; + // force user to have a long vec of calls participating + let set_code_index: CallIndex = Lottery::::call_to_index( + &frame_system::Call::::set_code(vec![]).into() + )?; + let already_called: (u32, Vec) = ( + LotteryIndex::get(), + vec![ + set_code_index; + T::MaxCalls::get().saturating_sub(1) + ], + ); + Participants::::insert(&caller, already_called); + + let call = frame_system::Call::::remark(vec![]); + }: _(RawOrigin::Signed(caller), Box::new(call.into())) + verify { + assert_eq!(TicketsCount::get(), 1); + } + + set_calls { + let n in 0 .. T::MaxCalls::get() as u32; + let calls = vec![frame_system::Call::::remark(vec![]).into(); n as usize]; + + let call = Call::::set_calls(calls); + let origin = T::ManagerOrigin::successful_origin(); + assert!(CallIndices::get().is_empty()); + }: { call.dispatch_bypass_filter(origin)? } + verify { + if !n.is_zero() { + assert!(!CallIndices::get().is_empty()); + } + } + + start_lottery { + let price = BalanceOf::::max_value(); + let end = 10u32.into(); + let payout = 5u32.into(); + + let call = Call::::start_lottery(price, end, payout, true); + let origin = T::ManagerOrigin::successful_origin(); + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert!(crate::Lottery::::get().is_some()); + } + + stop_repeat { + setup_lottery::(true)?; + assert_eq!(crate::Lottery::::get().unwrap().repeat, true); + let call = Call::::stop_repeat(); + let origin = T::ManagerOrigin::successful_origin(); + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_eq!(crate::Lottery::::get().unwrap().repeat, false); + } + + on_initialize_end { + setup_lottery::(false)?; + let winner = account("winner", 0, 0); + // User needs more than min balance to get ticket + T::Currency::make_free_balance_be(&winner, T::Currency::minimum_balance() * 10u32.into()); + // Make sure lottery account has at least min balance too + let lottery_account = Lottery::::account_id(); + T::Currency::make_free_balance_be(&lottery_account, T::Currency::minimum_balance() * 10u32.into()); + // Buy a ticket + let call = frame_system::Call::::remark(vec![]); + Lottery::::buy_ticket(RawOrigin::Signed(winner.clone()).into(), Box::new(call.into()))?; + // Kill user account for worst case + T::Currency::make_free_balance_be(&winner, 0u32.into()); + // Assert that lotto is set up for winner + assert_eq!(TicketsCount::get(), 1); + assert!(!Lottery::::pot().1.is_zero()); + }: { + // Generate `MaxGenerateRandom` numbers for worst case scenario + for i in 0 .. T::MaxGenerateRandom::get() { + Lottery::::generate_random_number(i); + } + // Start lottery has block 15 configured for payout + Lottery::::on_initialize(15u32.into()); + } + verify { + assert!(crate::Lottery::::get().is_none()); + assert_eq!(TicketsCount::get(), 0); + assert_eq!(Lottery::::pot().1, 0u32.into()); + assert!(!T::Currency::free_balance(&winner).is_zero()) + } + + on_initialize_repeat { + setup_lottery::(true)?; + let winner = account("winner", 0, 0); + // User needs more than min balance to get ticket + T::Currency::make_free_balance_be(&winner, T::Currency::minimum_balance() * 10u32.into()); + // Make sure lottery account has at least min balance too + let lottery_account = Lottery::::account_id(); + T::Currency::make_free_balance_be(&lottery_account, T::Currency::minimum_balance() * 10u32.into()); + // Buy a ticket + let call = frame_system::Call::::remark(vec![]); + Lottery::::buy_ticket(RawOrigin::Signed(winner.clone()).into(), Box::new(call.into()))?; + // Kill user account for worst case + T::Currency::make_free_balance_be(&winner, 0u32.into()); + // Assert that lotto is set up for winner + assert_eq!(TicketsCount::get(), 1); + assert!(!Lottery::::pot().1.is_zero()); + }: { + // Generate `MaxGenerateRandom` numbers for worst case scenario + for i in 0 .. T::MaxGenerateRandom::get() { + Lottery::::generate_random_number(i); + } + // Start lottery has block 15 configured for payout + Lottery::::on_initialize(15u32.into()); + } + verify { + assert!(crate::Lottery::::get().is_some()); + assert_eq!(LotteryIndex::get(), 2); + assert_eq!(TicketsCount::get(), 0); + assert_eq!(Lottery::::pot().1, 0u32.into()); + assert!(!T::Currency::free_balance(&winner).is_zero()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_buy_ticket::()); + assert_ok!(test_benchmark_set_calls::()); + assert_ok!(test_benchmark_start_lottery::()); + assert_ok!(test_benchmark_stop_repeat::()); + assert_ok!(test_benchmark_on_initialize_end::()); + assert_ok!(test_benchmark_on_initialize_repeat::()); + }); + } +} diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs new file mode 100644 index 0000000000000..11543d67b316b --- /dev/null +++ b/frame/lottery/src/lib.rs @@ -0,0 +1,452 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A lottery pallet that uses participation in the network to purchase tickets. +//! +//! With this pallet, you can configure a lottery, which is a pot of money that +//! users contribute to, and that is reallocated to a single user at the end of +//! the lottery period. Just like a normal lottery system, to participate, you +//! need to "buy a ticket", which is used to fund the pot. +//! +//! The unique feature of this lottery system is that tickets can only be +//! purchased by making a "valid call" dispatched through this pallet. +//! By configuring certain calls to be valid for the lottery, you can encourage +//! users to make those calls on your network. An example of how this could be +//! used is to set validator nominations as a valid lottery call. If the lottery +//! is set to repeat every month, then users would be encouraged to re-nominate +//! validators every month. A user can ony purchase one ticket per valid call +//! per lottery. +//! +//! This pallet can be configured to use dynamically set calls or statically set +//! calls. Call validation happens through the `ValidateCall` implementation. +//! This pallet provides one implementation of this using the `CallIndices` +//! storage item. You can also make your own implementation at the runtime level +//! which can contain much more complex logic, such as validation of the +//! parameters, which this pallet alone cannot do. +//! +//! This pallet uses the modulus operator to pick a random winner. It is known +//! that this might introduce a bias if the random number chosen in a range that +//! is not perfectly divisible by the total number of participants. The +//! `MaxGenerateRandom` configuration can help mitigate this by generating new +//! numbers until we hit the limit or we find a "fair" number. This is best +//! effort only. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +mod benchmarking; +pub mod weights; + +use sp_std::prelude::*; +use sp_runtime::{ + DispatchError, ModuleId, + traits::{AccountIdConversion, Saturating, Zero}, +}; +use frame_support::{ + Parameter, decl_module, decl_error, decl_event, decl_storage, ensure, RuntimeDebug, + dispatch::{Dispatchable, DispatchResult, GetDispatchInfo}, + traits::{ + Currency, ReservableCurrency, Get, EnsureOrigin, ExistenceRequirement::KeepAlive, Randomness, + }, +}; +use frame_support::weights::Weight; +use frame_system::ensure_signed; +use codec::{Encode, Decode}; +pub use weights::WeightInfo; + +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +/// The module's config trait. +pub trait Config: frame_system::Config { + /// The Lottery's module id + type ModuleId: Get; + + /// A dispatchable call. + type Call: Parameter + Dispatchable + GetDispatchInfo + From>; + + /// The currency trait. + type Currency: ReservableCurrency; + + /// Something that provides randomness in the runtime. + type Randomness: Randomness; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// The manager origin. + type ManagerOrigin: EnsureOrigin; + + /// The max number of calls available in a single lottery. + type MaxCalls: Get; + + /// Used to determine if a call would be valid for purchasing a ticket. + /// + /// Be conscious of the implementation used here. We assume at worst that + /// a vector of `MaxCalls` indices are queried for any call validation. + /// You may need to provide a custom benchmark if this assumption is broken. + type ValidateCall: ValidateCall; + + /// Number of time we should try to generate a random number that has no modulo bias. + /// The larger this number, the more potential computation is used for picking the winner, + /// but also the more likely that the chosen winner is done fairly. + type MaxGenerateRandom: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +// Any runtime call can be encoded into two bytes which represent the pallet and call index. +// We use this to uniquely match someone's incoming call with the calls configured for the lottery. +type CallIndex = (u8, u8); + +#[derive(Encode, Decode, Default, Eq, PartialEq, RuntimeDebug)] +pub struct LotteryConfig { + /// Price per entry. + price: Balance, + /// Starting block of the lottery. + start: BlockNumber, + /// Length of the lottery (start + length = end). + length: BlockNumber, + /// Delay for choosing the winner of the lottery. (start + length + delay = payout). + /// Randomness in the "payout" block will be used to determine the winner. + delay: BlockNumber, + /// Whether this lottery will repeat after it completes. + repeat: bool, +} + +pub trait ValidateCall { + fn validate_call(call: &::Call) -> bool; +} + +impl ValidateCall for () { + fn validate_call(_: &::Call) -> bool { false } +} + +impl ValidateCall for Module { + fn validate_call(call: &::Call) -> bool { + let valid_calls = CallIndices::get(); + let call_index = match Self::call_to_index(&call) { + Ok(call_index) => call_index, + Err(_) => return false, + }; + valid_calls.iter().any(|c| call_index == *c) + } +} + +decl_storage! { + trait Store for Module as Lottery { + LotteryIndex: u32; + /// The configuration for the current lottery. + Lottery: Option>>; + /// Users who have purchased a ticket. (Lottery Index, Tickets Purchased) + Participants: map hasher(twox_64_concat) T::AccountId => (u32, Vec); + /// Total number of tickets sold. + TicketsCount: u32; + /// Each ticket's owner. + /// + /// May have residual storage from previous lotteries. Use `TicketsCount` to see which ones + /// are actually valid ticket mappings. + Tickets: map hasher(twox_64_concat) u32 => Option; + /// The calls stored in this pallet to be used in an active lottery if configured + /// by `Config::ValidateCall`. + CallIndices: Vec; + } +} + +decl_event!( + pub enum Event where + ::AccountId, + Balance = BalanceOf, + { + /// A lottery has been started! + LotteryStarted, + /// A new set of calls have been set! + CallsUpdated, + /// A winner has been chosen! + Winner(AccountId, Balance), + /// A ticket has been bought! + TicketBought(AccountId, CallIndex), + } +); + +decl_error! { + pub enum Error for Module { + /// An overflow has occurred. + Overflow, + /// A lottery has not been configured. + NotConfigured, + /// A lottery is already in progress. + InProgress, + /// A lottery has already ended. + AlreadyEnded, + /// The call is not valid for an open lottery. + InvalidCall, + /// You are already participating in the lottery with this call. + AlreadyParticipating, + /// Too many calls for a single lottery. + TooManyCalls, + /// Failed to encode calls + EncodingFailed, + } +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin, system = frame_system { + const ModuleId: ModuleId = T::ModuleId::get(); + const MaxCalls: u32 = T::MaxCalls::get() as u32; + + fn deposit_event() = default; + + /// Buy a ticket to enter the lottery. + /// + /// This extrinsic acts as a passthrough function for `call`. In all + /// situations where `call` alone would succeed, this extrinsic should + /// succeed. + /// + /// If `call` is successful, then we will attempt to purchase a ticket, + /// which may fail silently. To detect success of a ticket purchase, you + /// should listen for the `TicketBought` event. + /// + /// This extrinsic must be called by a signed origin. + #[weight = + T::WeightInfo::buy_ticket() + .saturating_add(call.get_dispatch_info().weight) + ] + fn buy_ticket(origin, call: Box<::Call>) { + let caller = ensure_signed(origin.clone())?; + call.clone().dispatch(origin).map_err(|e| e.error)?; + + let _ = Self::do_buy_ticket(&caller, &call); + } + + /// Set calls in storage which can be used to purchase a lottery ticket. + /// + /// This function only matters if you use the `ValidateCall` implementation + /// provided by this pallet, which uses storage to determine the valid calls. + /// + /// This extrinsic must be called by the Manager origin. + #[weight = T::WeightInfo::set_calls(calls.len() as u32)] + fn set_calls(origin, calls: Vec<::Call>) { + T::ManagerOrigin::ensure_origin(origin)?; + ensure!(calls.len() <= T::MaxCalls::get(), Error::::TooManyCalls); + if calls.is_empty() { + CallIndices::kill(); + } else { + let indices = Self::calls_to_indices(&calls)?; + CallIndices::put(indices); + } + Self::deposit_event(RawEvent::CallsUpdated); + } + + /// Start a lottery using the provided configuration. + /// + /// This extrinsic must be called by the `ManagerOrigin`. + /// + /// Parameters: + /// + /// * `price`: The cost of a single ticket. + /// * `length`: How long the lottery should run for starting at the current block. + /// * `delay`: How long after the lottery end we should wait before picking a winner. + /// * `repeat`: If the lottery should repeat when completed. + #[weight = T::WeightInfo::start_lottery()] + fn start_lottery(origin, + price: BalanceOf, + length: T::BlockNumber, + delay: T::BlockNumber, + repeat: bool, + ) { + T::ManagerOrigin::ensure_origin(origin)?; + Lottery::::try_mutate(|lottery| -> DispatchResult { + ensure!(lottery.is_none(), Error::::InProgress); + let index = LotteryIndex::get(); + let new_index = index.checked_add(1).ok_or(Error::::Overflow)?; + let start = frame_system::Module::::block_number(); + // Use new_index to more easily track everything with the current state. + *lottery = Some(LotteryConfig { + price, + start, + length, + delay, + repeat, + }); + LotteryIndex::put(new_index); + Ok(()) + })?; + // Make sure pot exists. + let lottery_account = Self::account_id(); + if T::Currency::total_balance(&lottery_account).is_zero() { + T::Currency::deposit_creating(&lottery_account, T::Currency::minimum_balance()); + } + Self::deposit_event(RawEvent::LotteryStarted); + } + + /// If a lottery is repeating, you can use this to stop the repeat. + /// The lottery will continue to run to completion. + /// + /// This extrinsic must be called by the `ManagerOrigin`. + #[weight = T::WeightInfo::stop_repeat()] + fn stop_repeat(origin) { + T::ManagerOrigin::ensure_origin(origin)?; + Lottery::::mutate(|mut lottery| { + if let Some(config) = &mut lottery { + config.repeat = false + } + }); + } + + fn on_initialize(n: T::BlockNumber) -> Weight { + Lottery::::mutate(|mut lottery| -> Weight { + if let Some(config) = &mut lottery { + let payout_block = config.start + .saturating_add(config.length) + .saturating_add(config.delay); + if payout_block <= n { + let (lottery_account, lottery_balance) = Self::pot(); + let ticket_count = TicketsCount::get(); + + let winning_number = Self::choose_winner(ticket_count); + let winner = Tickets::::get(winning_number).unwrap_or(lottery_account); + // Not much we can do if this fails... + let _ = T::Currency::transfer(&Self::account_id(), &winner, lottery_balance, KeepAlive); + + Self::deposit_event(RawEvent::Winner(winner, lottery_balance)); + + TicketsCount::kill(); + + if config.repeat { + // If lottery should repeat, increment index by 1. + LotteryIndex::mutate(|index| *index = index.saturating_add(1)); + // Set a new start with the current block. + config.start = n; + return T::WeightInfo::on_initialize_repeat() + } else { + // Else, kill the lottery storage. + *lottery = None; + return T::WeightInfo::on_initialize_end() + } + // We choose not need to kill Participants and Tickets to avoid a large number + // of writes at one time. Instead, data persists between lotteries, but is not used + // if it is not relevant. + } + } + return T::DbWeight::get().reads(1) + }) + } + } +} + +impl Module { + /// The account ID of the lottery pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::ModuleId::get().into_account() + } + + /// Return the pot account and amount of money in the pot. + // The existential deposit is not part of the pot so lottery account never gets deleted. + fn pot() -> (T::AccountId, BalanceOf) { + let account_id = Self::account_id(); + let balance = T::Currency::free_balance(&account_id) + .saturating_sub(T::Currency::minimum_balance()); + + (account_id, balance) + } + + // Converts a vector of calls into a vector of call indices. + fn calls_to_indices(calls: &[::Call]) -> Result, DispatchError> { + let mut indices = Vec::with_capacity(calls.len()); + for c in calls.iter() { + let index = Self::call_to_index(c)?; + indices.push(index) + } + Ok(indices) + } + + // Convert a call to it's call index by encoding the call and taking the first two bytes. + fn call_to_index(call: &::Call) -> Result { + let encoded_call = call.encode(); + if encoded_call.len() < 2 { Err(Error::::EncodingFailed)? } + return Ok((encoded_call[0], encoded_call[1])) + } + + // Logic for buying a ticket. + fn do_buy_ticket(caller: &T::AccountId, call: &::Call) -> DispatchResult { + // Check the call is valid lottery + let config = Lottery::::get().ok_or(Error::::NotConfigured)?; + let block_number = frame_system::Module::::block_number(); + ensure!(block_number < config.start.saturating_add(config.length), Error::::AlreadyEnded); + ensure!(T::ValidateCall::validate_call(call), Error::::InvalidCall); + let call_index = Self::call_to_index(call)?; + let ticket_count = TicketsCount::get(); + let new_ticket_count = ticket_count.checked_add(1).ok_or(Error::::Overflow)?; + // Try to update the participant status + Participants::::try_mutate(&caller, |(lottery_index, participating_calls)| -> DispatchResult { + let index = LotteryIndex::get(); + // If lottery index doesn't match, then reset participating calls and index. + if *lottery_index != index { + *participating_calls = Vec::new(); + *lottery_index = index; + } else { + // Check that user is not already participating under this call. + ensure!(!participating_calls.iter().any(|c| call_index == *c), Error::::AlreadyParticipating); + } + // Check user has enough funds and send it to the Lottery account. + T::Currency::transfer(caller, &Self::account_id(), config.price, KeepAlive)?; + // Create a new ticket. + TicketsCount::put(new_ticket_count); + Tickets::::insert(ticket_count, caller.clone()); + participating_calls.push(call_index); + Ok(()) + })?; + + Self::deposit_event(RawEvent::TicketBought(caller.clone(), call_index)); + + Ok(()) + } + + // Randomly choose a winner from among the total number of participants. + fn choose_winner(total: u32) -> u32 { + let mut random_number = Self::generate_random_number(0); + + // Best effort attempt to remove bias from modulus operator. + for i in 1 .. T::MaxGenerateRandom::get() { + if random_number < u32::MAX - u32::MAX % total { + break; + } + + random_number = Self::generate_random_number(i); + } + + random_number % total + } + + // Generate a random number from a given seed. + // Note that there is potential bias introduced by using modulus operator. + // You should call this function with different seed values until the random + // number lies within `u32::MAX - u32::MAX % n`. + fn generate_random_number(seed: u32) -> u32 { + let random_seed = T::Randomness::random(&(T::ModuleId::get(), seed).encode()); + let random_number = ::decode(&mut random_seed.as_ref()) + .expect("secure hashes should always be bigger than u32; qed"); + random_number + } +} diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs new file mode 100644 index 0000000000000..ea73ee190e6dd --- /dev/null +++ b/frame/lottery/src/mock.rs @@ -0,0 +1,137 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities + +use super::*; +use crate as pallet_lottery; + +use frame_support::{ + parameter_types, + traits::{OnInitialize, OnFinalize, TestRandomness}, +}; +use sp_core::H256; +use sp_runtime::{ + Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; +use frame_system::EnsureRoot; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Lottery: pallet_lottery::{Module, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: u32 = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const LotteryModuleId: ModuleId = ModuleId(*b"py/lotto"); + pub const MaxCalls: usize = 2; + pub const MaxGenerateRandom: u32 = 10; +} + +impl Config for Test { + type ModuleId = LotteryModuleId; + type Call = Call; + type Currency = Balances; + type Randomness = TestRandomness; + type Event = Event; + type ManagerOrigin = EnsureRoot; + type MaxCalls = MaxCalls; + type ValidateCall = Lottery; + type MaxGenerateRandom = MaxGenerateRandom; + type WeightInfo = (); +} + +pub type SystemCall = frame_system::Call; +pub type BalancesCall = pallet_balances::Call; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], + }.assimilate_storage(&mut t).unwrap(); + t.into() +} + +/// Run until a particular block. +pub fn run_to_block(n: u64) { + while System::block_number() < n { + if System::block_number() > 1 { + Lottery::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + } + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Lottery::on_initialize(System::block_number()); + } +} diff --git a/frame/lottery/src/tests.rs b/frame/lottery/src/tests.rs new file mode 100644 index 0000000000000..03c542d5000d1 --- /dev/null +++ b/frame/lottery/src/tests.rs @@ -0,0 +1,261 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for the module. + +use super::*; +use mock::{ + Lottery, Balances, Test, Origin, Call, SystemCall, BalancesCall, + new_test_ext, run_to_block +}; +use sp_runtime::traits::{BadOrigin}; +use frame_support::{assert_noop, assert_ok}; +use pallet_balances::Error as BalancesError; + +#[test] +fn initial_state() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(Lottery::account_id()), 0); + assert!(crate::Lottery::::get().is_none()); + assert_eq!(Participants::::get(&1), (0, vec![])); + assert_eq!(TicketsCount::get(), 0); + assert!(Tickets::::get(0).is_none()); + }); +} + +#[test] +fn basic_end_to_end_works() { + new_test_ext().execute_with(|| { + let price = 10; + let length = 20; + let delay = 5; + let calls = vec![ + Call::Balances(BalancesCall::force_transfer(0, 0, 0)), + Call::Balances(BalancesCall::transfer(0, 0)), + ]; + + // Set calls for the lottery + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + + // Start lottery, it repeats + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, true)); + assert!(crate::Lottery::::get().is_some()); + + assert_eq!(Balances::free_balance(&1), 100); + let call = Box::new(Call::Balances(BalancesCall::transfer(2, 20))); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + // 20 from the transfer, 10 from buying a ticket + assert_eq!(Balances::free_balance(&1), 100 - 20 - 10); + assert_eq!(Participants::::get(&1).1.len(), 1); + assert_eq!(TicketsCount::get(), 1); + // 1 owns the 0 ticket + assert_eq!(Tickets::::get(0), Some(1)); + + // More ticket purchases + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(3), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(4), call.clone())); + assert_eq!(TicketsCount::get(), 4); + + // Go to end + run_to_block(20); + assert_ok!(Lottery::buy_ticket(Origin::signed(5), call.clone())); + // Ticket isn't bought + assert_eq!(TicketsCount::get(), 4); + + // Go to payout + run_to_block(25); + // User 1 wins + assert_eq!(Balances::free_balance(&1), 70 + 40); + // Lottery is reset and restarted + assert_eq!(TicketsCount::get(), 0); + assert_eq!(LotteryIndex::get(), 2); + assert_eq!( + crate::Lottery::::get().unwrap(), + LotteryConfig { + price, + start: 25, + length, + delay, + repeat: true, + } + ); + }); +} + +#[test] +fn set_calls_works() { + new_test_ext().execute_with(|| { + assert!(!CallIndices::exists()); + + let calls = vec![ + Call::Balances(BalancesCall::force_transfer(0, 0, 0)), + Call::Balances(BalancesCall::transfer(0, 0)), + ]; + + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + assert!(CallIndices::exists()); + + let too_many_calls = vec![ + Call::Balances(BalancesCall::force_transfer(0, 0, 0)), + Call::Balances(BalancesCall::transfer(0, 0)), + Call::System(SystemCall::remark(vec![])), + ]; + + assert_noop!( + Lottery::set_calls(Origin::root(), too_many_calls), + Error::::TooManyCalls, + ); + + // Clear calls + assert_ok!(Lottery::set_calls(Origin::root(), vec![])); + assert!(CallIndices::get().is_empty()); + }); +} + +#[test] +fn start_lottery_works() { + new_test_ext().execute_with(|| { + let price = 10; + let length = 20; + let delay = 5; + + // Setup ignores bad origin + assert_noop!( + Lottery::start_lottery(Origin::signed(1), price, length, delay, false), + BadOrigin, + ); + + // All good + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); + + // Can't open another one if lottery is already present + assert_noop!( + Lottery::start_lottery(Origin::root(), price, length, delay, false), + Error::::InProgress, + ); + }); +} + +#[test] +fn buy_ticket_works_as_simple_passthrough() { + // This test checks that even if the user could not buy a ticket, that `buy_ticket` acts + // as a simple passthrough to the real call. + new_test_ext().execute_with(|| { + // No lottery set up + let call = Box::new(Call::Balances(BalancesCall::transfer(2, 20))); + // This is just a basic transfer then + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(Balances::free_balance(&1), 100 - 20); + assert_eq!(TicketsCount::get(), 0); + + // Lottery is set up, but too expensive to enter, so `do_buy_ticket` fails. + let calls = vec![ + Call::Balances(BalancesCall::force_transfer(0, 0, 0)), + Call::Balances(BalancesCall::transfer(0, 0)), + ]; + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + + // Ticket price of 60 would kill the user's account + assert_ok!(Lottery::start_lottery(Origin::root(), 60, 10, 5, false)); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(Balances::free_balance(&1), 100 - 20 - 20); + assert_eq!(TicketsCount::get(), 0); + + // If call would fail, the whole thing still fails the same + let fail_call = Box::new(Call::Balances(BalancesCall::transfer(2, 1000))); + assert_noop!( + Lottery::buy_ticket(Origin::signed(1), fail_call), + BalancesError::::InsufficientBalance, + ); + + let bad_origin_call = Box::new(Call::Balances(BalancesCall::force_transfer(0, 0, 0))); + assert_noop!( + Lottery::buy_ticket(Origin::signed(1), bad_origin_call), + BadOrigin, + ); + + // User can call other txs, but doesn't get a ticket + let remark_call = Box::new(Call::System(SystemCall::remark(b"hello, world!".to_vec()))); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), remark_call)); + assert_eq!(TicketsCount::get(), 0); + + let successful_call = Box::new(Call::Balances(BalancesCall::transfer(2, 1))); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), successful_call)); + assert_eq!(TicketsCount::get(), 1); + }); +} + +#[test] +fn buy_ticket_works() { + new_test_ext().execute_with(|| { + // Set calls for the lottery. + let calls = vec![ + Call::System(SystemCall::remark(vec![])), + Call::Balances(BalancesCall::transfer(0, 0)), + ]; + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + + + // Can't buy ticket before start + let call = Box::new(Call::Balances(BalancesCall::transfer(2, 1))); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(TicketsCount::get(), 0); + + // Start lottery + assert_ok!(Lottery::start_lottery(Origin::root(), 1, 20, 5, false)); + + // Go to start, buy ticket for transfer + run_to_block(5); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_eq!(TicketsCount::get(), 1); + + // Can't buy another of the same ticket (even if call is slightly changed) + let call = Box::new(Call::Balances(BalancesCall::transfer(3, 30))); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_eq!(TicketsCount::get(), 1); + + // Buy ticket for remark + let call = Box::new(Call::System(SystemCall::remark(b"hello, world!".to_vec()))); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(TicketsCount::get(), 2); + + // Go to end, can't buy tickets anymore + run_to_block(20); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_eq!(TicketsCount::get(), 2); + + // Go to payout, can't buy tickets when there is no lottery open + run_to_block(25); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_eq!(TicketsCount::get(), 0); + assert_eq!(LotteryIndex::get(), 1); + }); +} + +#[test] +fn start_lottery_will_create_account() { + new_test_ext().execute_with(|| { + let price = 10; + let length = 20; + let delay = 5; + + assert_eq!(Balances::total_balance(&Lottery::account_id()), 0); + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); + assert_eq!(Balances::total_balance(&Lottery::account_id()), 1); + }); +} diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs new file mode 100644 index 0000000000000..28d5ac0945b1d --- /dev/null +++ b/frame/lottery/src/weights.rs @@ -0,0 +1,124 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_lottery +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2021-01-05, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_lottery +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/lottery/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_lottery. +pub trait WeightInfo { + fn buy_ticket() -> Weight; + fn set_calls(n: u32, ) -> Weight; + fn start_lottery() -> Weight; + fn stop_repeat() -> Weight; + fn on_initialize_end() -> Weight; + fn on_initialize_repeat() -> Weight; +} + +/// Weights for pallet_lottery using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn buy_ticket() -> Weight { + (97_799_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn set_calls(n: u32, ) -> Weight { + (20_932_000 as Weight) + // Standard Error: 9_000 + .saturating_add((513_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn start_lottery() -> Weight { + (77_600_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn stop_repeat() -> Weight { + (10_707_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn on_initialize_end() -> Weight { + (162_126_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn on_initialize_repeat() -> Weight { + (169_310_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn buy_ticket() -> Weight { + (97_799_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn set_calls(n: u32, ) -> Weight { + (20_932_000 as Weight) + // Standard Error: 9_000 + .saturating_add((513_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn start_lottery() -> Weight { + (77_600_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn stop_repeat() -> Weight { + (10_707_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn on_initialize_end() -> Weight { + (162_126_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn on_initialize_repeat() -> Weight { + (169_310_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } +} diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 1cac5d38c5f18..98987e6fe901b 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-membership" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 492fda88dd170..f080938095445 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,9 +30,9 @@ use frame_support::{ }; use frame_system::ensure_signed; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Required origin for adding a member (though can always be Root). type AddOrigin: EnsureOrigin; @@ -59,7 +59,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Membership { + trait Store for Module, I: Instance=DefaultInstance> as Membership { /// The current membership, stored as an ordered Vec. Members get(fn members): Vec; @@ -80,8 +80,8 @@ decl_storage! { decl_event!( pub enum Event where - ::AccountId, - >::Event, + ::AccountId, + >::Event, { /// The given member was added; see the transaction for who. MemberAdded, @@ -100,7 +100,7 @@ decl_event!( decl_error! { /// Error for the nicks module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Already a member. AlreadyMember, /// Not a member. @@ -109,7 +109,7 @@ decl_error! { } decl_module! { - pub struct Module, I: Instance=DefaultInstance> + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { @@ -253,7 +253,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { fn rejig_prime(members: &[T::AccountId]) { if let Some(prime) = Prime::::get() { match members.binary_search(&prime) { @@ -264,7 +264,7 @@ impl, I: Instance> Module { } } -impl, I: Instance> Contains for Module { +impl, I: Instance> Contains for Module { fn sorted_members() -> Vec { Self::members() } @@ -277,54 +277,57 @@ impl, I: Instance> Contains for Module { #[cfg(test)] mod tests { use super::*; + use crate as pallet_membership; - use std::cell::RefCell; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types - }; + use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; use frame_system::EnsureSignedBy; - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Membership: pallet_membership::{Module, Call, Storage, Config, Event}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static Members: Vec = vec![]; + pub static Prime: Option = None; } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } ord_parameter_types! { pub const One: u64 = 1; @@ -334,11 +337,6 @@ mod tests { pub const Five: u64 = 5; } - thread_local! { - static MEMBERS: RefCell> = RefCell::new(vec![]); - static PRIME: RefCell> = RefCell::new(None); - } - pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { @@ -363,8 +361,8 @@ mod tests { } } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type AddOrigin = EnsureSignedBy; type RemoveOrigin = EnsureSignedBy; type SwapOrigin = EnsureSignedBy; @@ -374,12 +372,10 @@ mod tests { type MembershipChanged = TestChangeMembers; } - type Membership = Module; - fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. - GenesisConfig::{ + pallet_membership::GenesisConfig::{ members: vec![10, 20, 30], .. Default::default() }.assimilate_storage(&mut t).unwrap(); diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml new file mode 100644 index 0000000000000..eea3845ae16d3 --- /dev/null +++ b/frame/merkle-mountain-range/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "pallet-mmr" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Merkle Mountain Range pallet." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.1" } +pallet-mmr-primitives = { version = "3.0.0", default-features = false, path = "./primitives" } +serde = { version = "1.0.101", optional = true } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +env_logger = "0.8" +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "mmr-lib/std", + "pallet-mmr-primitives/std", + "serde", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/merkle-mountain-range/primitives/Cargo.toml b/frame/merkle-mountain-range/primitives/Cargo.toml new file mode 100644 index 0000000000000..be0a8bdc3a2bb --- /dev/null +++ b/frame/merkle-mountain-range/primitives/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "pallet-mmr-primitives" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Merkle Mountain Range primitives." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../../system" } +serde = { version = "1.0.101", optional = true, features = ["derive"] } +sp-api = { version = "3.0.0", default-features = false, path = "../../../primitives/api" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } + +[dev-dependencies] +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "serde", + "sp-api/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/frame/merkle-mountain-range/primitives/src/lib.rs b/frame/merkle-mountain-range/primitives/src/lib.rs new file mode 100644 index 0000000000000..d57f8565b6081 --- /dev/null +++ b/frame/merkle-mountain-range/primitives/src/lib.rs @@ -0,0 +1,557 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Merkle Mountain Range primitive types. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +use frame_support::{RuntimeDebug, debug}; +use sp_runtime::traits::{self, Saturating, One}; +use sp_std::fmt; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +/// A provider of the MMR's leaf data. +pub trait LeafDataProvider { + /// A type that should end up in the leaf of MMR. + type LeafData: FullLeaf + codec::Decode; + + /// The method to return leaf data that should be placed + /// in the leaf node appended MMR at this block. + /// + /// This is being called by the `on_initialize` method of + /// this pallet at the very beginning of each block. + fn leaf_data() -> Self::LeafData; +} + +impl LeafDataProvider for () { + type LeafData = (); + + fn leaf_data() -> Self::LeafData { + () + } +} + +/// The most common use case for MMRs is to store historical block hashes, +/// so that any point in time in the future we can receive a proof about some past +/// blocks without using excessive on-chain storage. +/// +/// Hence we implement the [LeafDataProvider] for [frame_system::Module]. Since the +/// current block hash is not available (since the block is not finished yet), +/// we use the `parent_hash` here along with parent block number. +impl LeafDataProvider for frame_system::Module { + type LeafData = ( + ::BlockNumber, + ::Hash + ); + + fn leaf_data() -> Self::LeafData { + ( + Self::block_number().saturating_sub(One::one()), + Self::parent_hash() + ) + } +} + +/// New MMR root notification hook. +pub trait OnNewRoot { + /// Function called by the pallet in case new MMR root has been computed. + fn on_new_root(root: &Hash); +} + +/// No-op implementation of [OnNewRoot]. +impl OnNewRoot for () { + fn on_new_root(_root: &Hash) {} +} + +/// A full leaf content stored in the offchain-db. +pub trait FullLeaf: Clone + PartialEq + fmt::Debug { + /// Encode the leaf either in it's full or compact form. + /// + /// NOTE the encoding returned here MUST be `Decode`able into `FullLeaf`. + fn using_encoded R>(&self, f: F, compact: bool) -> R; +} + +impl FullLeaf for T { + fn using_encoded R>(&self, f: F, _compact: bool) -> R { + codec::Encode::using_encoded(self, f) + } +} + +/// An element representing either full data or it's hash. +/// +/// See [Compact] to see how it may be used in practice to reduce the size +/// of proofs in case multiple [LeafDataProvider]s are composed together. +/// This is also used internally by the MMR to differentiate leaf nodes (data) +/// and inner nodes (hashes). +/// +/// [DataOrHash::hash] method calculates the hash of this element in it's compact form, +/// so should be used instead of hashing the encoded form (which will always be non-compact). +#[derive(RuntimeDebug, Clone, PartialEq)] +pub enum DataOrHash { + /// Arbitrary data in it's full form. + Data(L), + /// A hash of some data. + Hash(H::Output), +} + +impl From for DataOrHash { + fn from(l: L) -> Self { + Self::Data(l) + } +} + +mod encoding { + use super::*; + + /// A helper type to implement [codec::Codec] for [DataOrHash]. + #[derive(codec::Encode, codec::Decode)] + enum Either { + Left(A), + Right(B), + } + + impl codec::Encode for DataOrHash { + fn encode_to(&self, dest: &mut T) { + match self { + Self::Data(l) => l.using_encoded( + |data| Either::<&[u8], &H::Output>::Left(data).encode_to(dest), false + ), + Self::Hash(h) => Either::<&[u8], &H::Output>::Right(h).encode_to(dest), + } + } + } + + impl codec::Decode for DataOrHash { + fn decode(value: &mut I) -> Result { + let decoded: Either, H::Output> = Either::decode(value)?; + Ok(match decoded { + Either::Left(l) => DataOrHash::Data(L::decode(&mut &*l)?), + Either::Right(r) => DataOrHash::Hash(r), + }) + } + } +} + +impl DataOrHash { + /// Retrieve a hash of this item. + /// + /// Depending on the node type it's going to either be a contained value for [DataOrHash::Hash] + /// node, or a hash of SCALE-encoded [DataOrHash::Data] data. + pub fn hash(&self) -> H::Output { + match *self { + Self::Data(ref leaf) => leaf.using_encoded(::hash, true), + Self::Hash(ref hash) => hash.clone(), + } + } +} + +/// A composition of multiple leaf elements with compact form representation. +/// +/// When composing together multiple [LeafDataProvider]s you will end up with +/// a tuple of `LeafData` that each element provides. +/// +/// However this will cause the leaves to have significant size, while for some +/// use cases it will be enough to prove only one element of the tuple. +/// That's the rationale for [Compact] struct. We wrap each element of the tuple +/// into [DataOrHash] and each tuple element is hashed first before constructing +/// the final hash of the entire tuple. This allows you to replace tuple elements +/// you don't care about with their hashes. +#[derive(RuntimeDebug, Clone, PartialEq)] +pub struct Compact { + /// Internal tuple representation. + pub tuple: T, + _hash: sp_std::marker::PhantomData, +} + +impl sp_std::ops::Deref for Compact { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.tuple + } +} + +impl Compact { + /// Create a new [Compact] wrapper for a tuple. + pub fn new(tuple: T) -> Self { + Self { tuple, _hash: Default::default() } + } +} + +impl codec::Decode for Compact { + fn decode(value: &mut I) -> Result { + T::decode(value).map(Compact::new) + } +} + +macro_rules! impl_leaf_data_for_tuple { + ( $( $name:ident : $id:tt ),+ ) => { + /// [FullLeaf] implementation for `Compact, ...)>` + impl FullLeaf for Compact, )+ )> where + H: traits::Hash, + $( $name: FullLeaf ),+ + { + fn using_encoded R>(&self, f: F, compact: bool) -> R { + if compact { + codec::Encode::using_encoded(&( + $( DataOrHash::::Hash(self.tuple.$id.hash()), )+ + ), f) + } else { + codec::Encode::using_encoded(&self.tuple, f) + } + } + } + + /// [LeafDataProvider] implementation for `Compact, ...)>` + /// + /// This provides a compact-form encoding for tuples wrapped in [Compact]. + impl LeafDataProvider for Compact where + H: traits::Hash, + $( $name: LeafDataProvider ),+ + { + type LeafData = Compact< + H, + ( $( DataOrHash, )+ ), + >; + + fn leaf_data() -> Self::LeafData { + let tuple = ( + $( DataOrHash::Data($name::leaf_data()), )+ + ); + Compact::new(tuple) + } + } + + /// [LeafDataProvider] implementation for `(Tuple, ...)` + /// + /// This provides regular (non-compactable) composition of [LeafDataProvider]s. + impl<$( $name ),+> LeafDataProvider for ( $( $name, )+ ) where + ( $( $name::LeafData, )+ ): FullLeaf, + $( $name: LeafDataProvider ),+ + { + type LeafData = ( $( $name::LeafData, )+ ); + + fn leaf_data() -> Self::LeafData { + ( + $( $name::leaf_data(), )+ + ) + } + } + } +} + +/// Test functions implementation for `Compact, ...)>` +#[cfg(test)] +impl Compact, DataOrHash)> where + H: traits::Hash, + A: FullLeaf, + B: FullLeaf, +{ + /// Retrieve a hash of this item in it's compact form. + pub fn hash(&self) -> H::Output { + self.using_encoded(::hash, true) + } +} + +impl_leaf_data_for_tuple!(A:0); +impl_leaf_data_for_tuple!(A:0, B:1); +impl_leaf_data_for_tuple!(A:0, B:1, C:2); +impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); +impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); + +/// A MMR proof data for one of the leaves. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct Proof { + /// The index of the leaf the proof is for. + pub leaf_index: u64, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: u64, + /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + pub items: Vec, +} + +/// Merkle Mountain Range operation error. +#[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] +pub enum Error { + /// Error while pushing new node. + Push, + /// Error getting the new root. + GetRoot, + /// Error commiting changes. + Commit, + /// Error during proof generation. + GenerateProof, + /// Proof verification error. + Verify, + /// Leaf not found in the storage. + LeafNotFound, +} + +impl Error { + #![allow(unused_variables)] + /// Consume given error `e` with `self` and generate a native log entry with error details. + pub fn log_error(self, e: impl fmt::Debug) -> Self { + debug::native::error!("[{:?}] MMR error: {:?}", self, e); + self + } + + /// Consume given error `e` with `self` and generate a native log entry with error details. + pub fn log_debug(self, e: impl fmt::Debug) -> Self { + debug::native::debug!("[{:?}] MMR error: {:?}", self, e); + self + } +} + +/// A helper type to allow using arbitrary SCALE-encoded leaf data in the RuntimeApi. +/// +/// The point is to be able to verify MMR proofs from external MMRs, where we don't +/// know the exact leaf type, but it's enough for us to have it SCALE-encoded. +/// +/// Note the leaf type should be encoded in its compact form when passed through this type. +/// See [FullLeaf] documentation for details. +/// +/// This type does not implement SCALE encoding/decoding on purpose to avoid confusion, +/// it would have to be SCALE-compatible with the concrete leaf type, but due to SCALE limitations +/// it's not possible to know how many bytes the encoding of concrete leaf type uses. +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[derive(RuntimeDebug, Clone, PartialEq)] +pub struct OpaqueLeaf( + /// Raw bytes of the leaf type encoded in its compact form. + /// + /// NOTE it DOES NOT include length prefix (like `Vec` encoding would). + #[cfg_attr(feature = "std", serde(with = "sp_core::bytes"))] + pub Vec +); + +impl OpaqueLeaf { + /// Convert a concrete MMR leaf into an opaque type. + pub fn from_leaf(leaf: &T) -> Self { + let encoded_leaf = leaf.using_encoded(|d| d.to_vec(), true); + OpaqueLeaf::from_encoded_leaf(encoded_leaf) + } + + /// Create a `OpaqueLeaf` given raw bytes of compact-encoded leaf. + pub fn from_encoded_leaf(encoded_leaf: Vec) -> Self { + OpaqueLeaf(encoded_leaf) + } +} + +impl FullLeaf for OpaqueLeaf { + fn using_encoded R>(&self, f: F, _compact: bool) -> R { + f(&self.0) + } +} + +sp_api::decl_runtime_apis! { + /// API to interact with MMR pallet. + pub trait MmrApi { + /// Generate MMR proof for a leaf under given index. + fn generate_proof(leaf_index: u64) -> Result<(Leaf, Proof), Error>; + + /// Verify MMR proof against on-chain MMR. + /// + /// Note this function will use on-chain MMR root hash and check if the proof + /// matches the hash. + /// See [Self::verify_proof_stateless] for a stateless verifier. + fn verify_proof(leaf: Leaf, proof: Proof) -> Result<(), Error>; + + /// Verify MMR proof against given root hash. + /// + /// Note this function does not require any on-chain storage - the + /// proof is verified against given MMR root hash. + /// + /// The leaf data is expected to be encoded in it's compact form. + fn verify_proof_stateless(root: Hash, leaf: Vec, proof: Proof) + -> Result<(), Error>; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use codec::Decode; + use sp_core::H256; + use sp_runtime::traits::Keccak256; + + pub(crate) fn hex(s: &str) -> H256 { + s.parse().unwrap() + } + + type Test = DataOrHash; + type TestCompact = Compact; + type TestProof = Proof<::Output>; + + #[test] + fn should_encode_decode_proof() { + // given + let proof: TestProof = Proof { + leaf_index: 5, + leaf_count: 10, + items: vec![ + hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + hex("d3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + hex("e3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + ], + }; + + // when + let encoded = codec::Encode::encode(&proof); + let decoded = TestProof::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(proof)); + } + + #[test] + fn should_encode_decode_correctly_if_no_compact() { + // given + let cases = vec![ + Test::Data("Hello World!".into()), + Test::Hash(hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")), + Test::Data("".into()), + Test::Data("3e48d6bcd417fb22e044747242451e2c0f3e602d1bcad2767c34808621956417".into()), + ]; + + // when + let encoded = cases + .iter() + .map(codec::Encode::encode) + .collect::>(); + + let decoded = encoded + .iter() + .map(|x| Test::decode(&mut &**x)) + .collect::>(); + + // then + assert_eq!(decoded, cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>()); + // check encoding correctness + assert_eq!(&encoded[0], &hex_literal::hex!("00343048656c6c6f20576f726c6421")); + assert_eq!( + encoded[1].as_slice(), + hex_literal::hex!( + "01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd" + ).as_ref() + ); + } + + #[test] + fn should_return_the_hash_correctly() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Hash(hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")); + + // when + let a = a.hash(); + let b = b.hash(); + + // then + assert_eq!(a, hex("a9c321be8c24ba4dc2bd73f5300bde67dc57228ab8b68b607bb4c39c5374fac9")); + assert_eq!(b, hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")); + } + + #[test] + fn compact_should_work() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + // when + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new(( + Test::Hash(a.hash()), + Test::Hash(b.hash()), + )); + + // then + assert_eq!(c.hash(), d.hash()); + } + + #[test] + fn compact_should_encode_decode_correctly() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new(( + Test::Hash(a.hash()), + Test::Hash(b.hash()), + )); + let cases = vec![c, d.clone()]; + + // when + let encoded_compact = cases + .iter() + .map(|c| c.using_encoded(|x| x.to_vec(), true)) + .collect::>(); + + let encoded = cases + .iter() + .map(|c| c.using_encoded(|x| x.to_vec(), false)) + .collect::>(); + + let decoded_compact = encoded_compact + .iter() + .map(|x| TestCompact::decode(&mut &**x)) + .collect::>(); + + let decoded = encoded + .iter() + .map(|x| TestCompact::decode(&mut &**x)) + .collect::>(); + + // then + assert_eq!(decoded, cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>()); + + assert_eq!(decoded_compact, vec![Ok(d.clone()), Ok(d.clone())]); + } + + #[test] + fn opaque_leaves_should_be_scale_compatible_with_concrete_ones() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new(( + Test::Hash(a.hash()), + Test::Hash(b.hash()), + )); + let cases = vec![c, d.clone()]; + + let encoded_compact = cases + .iter() + .map(|c| c.using_encoded(|x| x.to_vec(), true)) + .map(OpaqueLeaf::from_encoded_leaf) + .collect::>(); + + let opaque = cases + .iter() + .map(OpaqueLeaf::from_leaf) + .collect::>(); + + // then + assert_eq!( + encoded_compact, + opaque, + ); + } +} diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs new file mode 100644 index 0000000000000..e6b3cf7f2172c --- /dev/null +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the MMR pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use crate::*; +use frame_support::traits::OnInitialize; +use frame_benchmarking::benchmarks; +use sp_std::prelude::*; + +benchmarks! { + on_initialize { + let x in 1 .. 1_000; + + let leaves = x as u64; + }: { + for b in 0..leaves { + Module::::on_initialize((b as u32).into()); + } + } verify { + assert_eq!(crate::NumberOfLeaves::::get(), leaves); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + use crate::tests::new_test_ext; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_on_initialize::()); + }) + } +} diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs new file mode 100644 index 0000000000000..98bb404e3f3a1 --- /dev/null +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights for the MMR Pallet +//! This file was not auto-generated. + +use frame_support::weights::{ + Weight, constants::{WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, +}; + +impl crate::WeightInfo for () { + fn on_initialize(peaks: u64) -> Weight { + // Reading the parent hash. + let leaf_weight = DbWeight::get().reads(1); + // Blake2 hash cost. + let hash_weight = 2 * WEIGHT_PER_NANOS; + // No-op hook. + let hook_weight = 0; + + leaf_weight + .saturating_add(hash_weight) + .saturating_add(hook_weight) + .saturating_add(DbWeight::get().reads_writes( + 2 + peaks, + 2 + peaks, + )) + } +} diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs new file mode 100644 index 0000000000000..b137be7b53c1a --- /dev/null +++ b/frame/merkle-mountain-range/src/lib.rs @@ -0,0 +1,256 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Merkle Mountain Range +//! +//! ## Overview +//! +//! Details on Merkle Mountain Ranges (MMRs) can be found here: +//! +//! +//! The MMR pallet constructs a MMR from leaf data obtained on every block from +//! `LeafDataProvider`. MMR nodes are stored both in: +//! - on-chain storage - hashes only; not full leaf content) +//! - off-chain storage - via Indexing API we push full leaf content (and all internal nodes as +//! well) to the Off-chain DB, so that the data is available for Off-chain workers. +//! Hashing used for MMR is configurable independently from the rest of the runtime (i.e. not using +//! `frame_system::Hashing`) so something compatible with external chains can be used (like +//! Keccak256 for Ethereum compatibility). +//! +//! Depending on the usage context (off-chain vs on-chain) the pallet is able to: +//! - verify MMR leaf proofs (on-chain) +//! - generate leaf proofs (off-chain) +//! +//! See [primitives::Compact] documentation for how you can optimize proof size for leafs that are +//! composed from multiple elements. +//! +//! ## What for? +//! +//! Primary use case for this pallet is to generate MMR root hashes, that can latter on be used by +//! BEEFY protocol (see ). +//! MMR root hashes along with BEEFY will make it possible to build Super Light Clients (SLC) of +//! Substrate-based chains. The SLC will be able to follow finality and can be shown proofs of more +//! details that happened on the source chain. +//! In that case the chain which contains the pallet generates the Root Hashes and Proofs, which +//! are then presented to another chain acting as a light client which can verify them. +//! +//! Secondary use case is to archive historical data, but still be able to retrieve them on-demand +//! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point +//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned +//! from on-chain storage. +//! +//! NOTE This pallet is experimental and not proven to work in production. +//! +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Encode; +use frame_support::{ + decl_module, decl_storage, + weights::Weight, +}; +use sp_runtime::traits; + +mod default_weights; +mod mmr; +#[cfg(any(feature = "runtime-benchmarks", test))] +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub use pallet_mmr_primitives as primitives; + +pub trait WeightInfo { + fn on_initialize(peaks: u64) -> Weight; +} + +/// This pallet's configuration trait +pub trait Config: frame_system::Config { + /// Prefix for elements stored in the Off-chain DB via Indexing API. + /// + /// Each node of the MMR is inserted both on-chain and off-chain via Indexing API. + /// The former does not store full leaf content, just it's compact version (hash), + /// and some of the inner mmr nodes might be pruned from on-chain storage. + /// The later will contain all the entries in their full form. + /// + /// Each node is stored in the Off-chain DB under key derived from the [`Self::INDEXING_PREFIX`] and + /// it's in-tree index (MMR position). + const INDEXING_PREFIX: &'static [u8]; + + /// A hasher type for MMR. + /// + /// To construct trie nodes that result in merging (bagging) two peaks, depending on the node + /// kind we take either: + /// - The node (hash) itself if it's an inner node. + /// - The hash of SCALE-encoding of the leaf data if it's a leaf node. + /// + /// Then we create a tuple of these two hashes, SCALE-encode it (concatenate) and + /// hash, to obtain a new MMR inner node - the new peak. + type Hashing: traits::Hash>::Hash>; + + /// The hashing output type. + /// + /// This type is actually going to be stored in the MMR. + /// Required to be provided again, to satisfy trait bounds for storage items. + type Hash: traits::Member + traits::MaybeSerializeDeserialize + sp_std::fmt::Debug + + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + codec::Codec + + codec::EncodeLike; + + /// Data stored in the leaf nodes. + /// + /// The [LeafData](primitives::LeafDataProvider) is responsible for returning the entire leaf + /// data that will be inserted to the MMR. + /// [LeafDataProvider](primitives::LeafDataProvider)s can be composed into tuples to put + /// multiple elements into the tree. In such a case it might be worth using [primitives::Compact] + /// to make MMR proof for one element of the tuple leaner. + /// + /// Note that the leaf at each block MUST be unique. You may want to include a block hash or block + /// number as an easiest way to ensure that. + type LeafData: primitives::LeafDataProvider; + + /// A hook to act on the new MMR root. + /// + /// For some applications it might be beneficial to make the MMR root available externally + /// apart from having it in the storage. For instance you might output it in the header digest + /// (see [frame_system::Module::deposit_log]) to make it available for Light Clients. + /// Hook complexity should be `O(1)`. + type OnNewRoot: primitives::OnNewRoot<>::Hash>; + + /// Weights for this pallet. + type WeightInfo: WeightInfo; +} + +decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as MerkleMountainRange { + /// Latest MMR Root hash. + pub RootHash get(fn mmr_root_hash): >::Hash; + + /// Current size of the MMR (number of leaves). + pub NumberOfLeaves get(fn mmr_leaves): u64; + + /// Hashes of the nodes in the MMR. + /// + /// Note this collection only contains MMR peaks, the inner nodes (and leaves) + /// are pruned and only stored in the Offchain DB. + pub Nodes get(fn mmr_peak): map hasher(identity) u64 => Option<>::Hash>; + } +} + +decl_module! { + /// A public part of the pallet. + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + fn on_initialize(n: T::BlockNumber) -> Weight { + use primitives::LeafDataProvider; + let leaves = Self::mmr_leaves(); + let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + let data = T::LeafData::leaf_data(); + // append new leaf to MMR + let mut mmr: ModuleMmr = mmr::Mmr::new(leaves); + mmr.push(data).expect("MMR push never fails."); + + // update the size + let (leaves, root) = mmr.finalize().expect("MMR finalize never fails."); + >::on_new_root(&root); + + ::put(leaves); + >::put(root); + + let peaks_after = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) + } + } +} + +/// A MMR specific to the pallet. +type ModuleMmr = mmr::Mmr>; + +/// Leaf data. +type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; + +/// Hashing used for the pallet. +pub(crate) type HashingOf = >::Hashing; + +/// Stateless MMR proof verification. +/// +/// This function can be used to verify received MMR proof (`proof`) +/// for given leaf data (`leaf`) against a known MMR root hash (`root`). +/// +/// The verification does not require any storage access. +pub fn verify_leaf_proof( + root: H::Output, + leaf: mmr::Node, + proof: primitives::Proof, +) -> Result<(), primitives::Error> where + H: traits::Hash, + L: primitives::FullLeaf, +{ + let is_valid = mmr::verify_leaf_proof::(root, leaf, proof)?; + if is_valid { + Ok(()) + } else { + Err(primitives::Error::Verify.log_debug(("The proof is incorrect.", root))) + } +} + +impl, I: Instance> Module { + fn offchain_key(pos: u64) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, pos).encode() + } + + /// Generate a MMR proof for the given `leaf_index`. + /// + /// Note this method can only be used from an off-chain context + /// (Offchain Worker or Runtime API call), since it requires + /// all the leaves to be present. + /// It may return an error or panic if used incorrectly. + pub fn generate_proof(leaf_index: u64) -> Result< + (LeafOf, primitives::Proof<>::Hash>), + primitives::Error, + > { + let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + mmr.generate_proof(leaf_index) + } + + /// Verify MMR proof for given `leaf`. + /// + /// This method is safe to use within the runtime code. + /// It will return `Ok(())` if the proof is valid + /// and an `Err(..)` if MMR is inconsistent (some leaves are missing) + /// or the proof is invalid. + pub fn verify_leaf( + leaf: LeafOf, + proof: primitives::Proof<>::Hash>, + ) -> Result<(), primitives::Error> { + if proof.leaf_count > Self::mmr_leaves() + || proof.leaf_count == 0 + || proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() + { + return Err(primitives::Error::Verify.log_debug( + "The proof has incorrect number of leaves or proof items." + )); + } + + let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); + let is_valid = mmr.verify_leaf_proof(leaf, proof)?; + if is_valid { + Ok(()) + } else { + Err(primitives::Error::Verify.log_debug("The proof is incorrect.")) + } + } +} diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs new file mode 100644 index 0000000000000..a3d373bfd2e9e --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + Config, HashingOf, Instance, + mmr::{ + Node, NodeOf, Hasher, + storage::{Storage, OffchainStorage, RuntimeStorage}, + utils::NodesUtils, + }, + primitives::{self, Error}, +}; +#[cfg(not(feature = "std"))] +use sp_std::vec; + +/// Stateless verification of the leaf proof. +pub fn verify_leaf_proof( + root: H::Output, + leaf: Node, + proof: primitives::Proof, +) -> Result where + H: sp_runtime::traits::Hash, + L: primitives::FullLeaf, +{ + let size = NodesUtils::new(proof.leaf_count).size(); + let leaf_position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + + let p = mmr_lib::MerkleProof::< + Node, + Hasher, + >::new( + size, + proof.items.into_iter().map(Node::Hash).collect(), + ); + p.verify( + Node::Hash(root), + vec![(leaf_position, leaf)], + ).map_err(|e| Error::Verify.log_debug(e)) +} + +/// A wrapper around a MMR library to expose limited functionality. +/// +/// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) +/// vs [Off-chain](crate::mmr::storage::OffchainStorage)). +pub struct Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, + Storage: mmr_lib::MMRStore>, +{ + mmr: mmr_lib::MMR< + NodeOf, + Hasher, L>, + Storage + >, + leaves: u64, +} + +impl Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, + Storage: mmr_lib::MMRStore>, +{ + /// Create a pointer to an existing MMR with given number of leaves. + pub fn new(leaves: u64) -> Self { + let size = NodesUtils::new(leaves).size(); + Self { + mmr: mmr_lib::MMR::new(size, Default::default()), + leaves, + } + } + + /// Verify proof of a single leaf. + pub fn verify_leaf_proof( + &self, + leaf: L, + proof: primitives::Proof<>::Hash>, + ) -> Result { + let p = mmr_lib::MerkleProof::< + NodeOf, + Hasher, L>, + >::new( + self.mmr.mmr_size(), + proof.items.into_iter().map(Node::Hash).collect(), + ); + let position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + p.verify( + root, + vec![(position, Node::Data(leaf))], + ).map_err(|e| Error::Verify.log_debug(e)) + } + + /// Return the internal size of the MMR (number of nodes). + #[cfg(test)] + pub fn size(&self) -> u64 { + self.mmr.mmr_size() + } +} + +/// Runtime specific MMR functions. +impl Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, +{ + + /// Push another item to the MMR. + /// + /// Returns element position (index) in the MMR. + pub fn push(&mut self, leaf: L) -> Option { + let position = self.mmr.push(Node::Data(leaf)) + .map_err(|e| Error::Push.log_error(e)) + .ok()?; + + self.leaves += 1; + + Some(position) + } + + /// Commit the changes to underlying storage, return current number of leaves and + /// calculate the new MMR's root hash. + pub fn finalize(self) -> Result<(u64, >::Hash), Error> { + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; + Ok((self.leaves, root.hash())) + } +} + +/// Off-chain specific MMR functions. +impl Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf + codec::Decode, +{ + /// Generate a proof for given leaf index. + /// + /// Proof generation requires all the nodes (or their hashes) to be available in the storage. + /// (i.e. you can't run the function in the pruned storage). + pub fn generate_proof(&self, leaf_index: u64) -> Result< + (L, primitives::Proof<>::Hash>), + Error + > { + let position = mmr_lib::leaf_index_to_pos(leaf_index); + let store = >::default(); + let leaf = match mmr_lib::MMRStore::get_elem(&store, position) { + Ok(Some(Node::Data(leaf))) => leaf, + e => return Err(Error::LeafNotFound.log_debug(e)), + }; + let leaf_count = self.leaves; + self.mmr.gen_proof(vec![position]) + .map_err(|e| Error::GenerateProof.log_error(e)) + .map(|p| primitives::Proof { + leaf_index, + leaf_count, + items: p.proof_items().iter().map(|x| x.hash()).collect(), + }) + .map(|p| (leaf, p)) + } +} + diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs new file mode 100644 index 0000000000000..e705b247067e5 --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod storage; +pub mod utils; +mod mmr; + +use crate::primitives::FullLeaf; +use sp_runtime::traits; + +pub use self::mmr::{Mmr, verify_leaf_proof}; + +/// Node type for runtime `T`. +pub type NodeOf = Node<>::Hashing, L>; + +/// A node stored in the MMR. +pub type Node = crate::primitives::DataOrHash; + +/// Default Merging & Hashing behavior for MMR. +pub struct Hasher(sp_std::marker::PhantomData<(H, L)>); + +impl mmr_lib::Merge for Hasher { + type Item = Node; + + fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { + let mut concat = left.hash().as_ref().to_vec(); + concat.extend_from_slice(right.hash().as_ref()); + + Node::Hash(::hash(&concat)) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs new file mode 100644 index 0000000000000..0bff53f2fb057 --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -0,0 +1,112 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A MMR storage implementations. + +use codec::Encode; +use crate::mmr::{NodeOf, Node}; +use crate::{NumberOfLeaves, Nodes, Module, Config, Instance, primitives}; +use frame_support::{StorageMap, StorageValue}; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +/// A marker type for runtime-specific storage implementation. +/// +/// Allows appending new items to the MMR and proof verification. +/// MMR nodes are appended to two different storages: +/// 1. We add nodes (leaves) hashes to the on-chain storge (see [crate::Nodes]). +/// 2. We add full leaves (and all inner nodes as well) into the `IndexingAPI` during block +/// processing, so the values end up in the Offchain DB if indexing is enabled. +pub struct RuntimeStorage; + +/// A marker type for offchain-specific storage implementation. +/// +/// Allows proof generation and verification, but does not support appending new items. +/// MMR nodes are assumed to be stored in the Off-Chain DB. Note this storage type +/// DOES NOT support adding new items to the MMR. +pub struct OffchainStorage; + +/// A storage layer for MMR. +/// +/// There are two different implementations depending on the use case. +/// See docs for [RuntimeStorage] and [OffchainStorage]. +pub struct Storage( + sp_std::marker::PhantomData<(StorageType, T, I, L)> +); + +impl Default for Storage { + fn default() -> Self { + Self(Default::default()) + } +} + +impl mmr_lib::MMRStore> for Storage where + T: Config, + I: Instance, + L: primitives::FullLeaf + codec::Decode, +{ + fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + let key = Module::::offchain_key(pos); + // Retrieve the element from Off-chain DB. + Ok( + sp_io::offchain ::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok()) + ) + } + + fn append(&mut self, _: u64, _: Vec>) -> mmr_lib::Result<()> { + panic!("MMR must not be altered in the off-chain context.") + } +} + +impl mmr_lib::MMRStore> for Storage where + T: Config, + I: Instance, + L: primitives::FullLeaf, +{ + fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + Ok(>::get(pos) + .map(Node::Hash) + ) + } + + fn append(&mut self, pos: u64, elems: Vec>) -> mmr_lib::Result<()> { + let mut leaves = crate::NumberOfLeaves::::get(); + let mut size = crate::mmr::utils::NodesUtils::new(leaves).size(); + if pos != size { + return Err(mmr_lib::Error::InconsistentStore); + } + + for elem in elems { + // on-chain we only store the hash (even if it's a leaf) + >::insert(size, elem.hash()); + // Indexing API is used to store the full leaf content. + elem.using_encoded(|elem| { + sp_io::offchain_index::set(&Module::::offchain_key(size), elem) + }); + size += 1; + + if let Node::Data(..) = elem { + leaves += 1; + } + } + + NumberOfLeaves::::put(leaves); + + Ok(()) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs new file mode 100644 index 0000000000000..e966367b71f23 --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Merkle Mountain Range utilities. + +/// MMR nodes & size -related utilities. +pub struct NodesUtils { + no_of_leaves: u64, +} + +impl NodesUtils { + /// Create new instance of MMR nodes utilities for given number of leaves. + pub fn new(no_of_leaves: u64) -> Self { + Self { no_of_leaves } + } + + /// Calculate number of peaks in the MMR. + pub fn number_of_peaks(&self) -> u64 { + self.number_of_leaves().count_ones() as u64 + } + + /// Return the number of leaves in the MMR. + pub fn number_of_leaves(&self) -> u64 { + self.no_of_leaves + } + + /// Calculate the total size of MMR (number of nodes). + pub fn size(&self) -> u64 { + 2 * self.no_of_leaves - self.number_of_peaks() + } + + /// Calculate maximal depth of the MMR. + pub fn depth(&self) -> u32 { + if self.no_of_leaves == 0 { + return 0 + } + + 64 - self.no_of_leaves + .next_power_of_two() + .leading_zeros() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_calculate_number_of_leaves_correctly() { + assert_eq!( + vec![0, 1, 2, 3, 4, 9, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).depth()) + .collect::>(), + vec![0, 1, 2, 3, 3, 5, 5, 6] + ); + } + + #[test] + fn should_calculate_depth_correclty() { + assert_eq!( + vec![0, 1, 2, 3, 4, 9, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).number_of_leaves()) + .collect::>(), + vec![0, 1, 2, 3, 4, 9, 15, 21] + ); + } + + #[test] + fn should_calculate_number_of_peaks_correctly() { + assert_eq!( + vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).number_of_peaks()) + .collect::>(), + vec![0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 3] + ); + } + + #[test] + fn should_calculate_the_size_correctly() { + let _ = env_logger::try_init(); + + let leaves = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21]; + let sizes = vec![0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 39]; + assert_eq!( + leaves + .clone() + .into_iter() + .map(|n| NodesUtils::new(n).size()) + .collect::>(), + sizes.clone() + ); + + // size cross-check + let mut actual_sizes = vec![]; + for s in &leaves[1..] { + crate::tests::new_test_ext().execute_with(|| { + let mut mmr = crate::mmr::Mmr::< + crate::mmr::storage::RuntimeStorage, + crate::mock::Test, + crate::DefaultInstance, + _, + >::new(0); + for i in 0..*s { + mmr.push(i); + } + actual_sizes.push(mmr.size()); + }) + } + assert_eq!( + sizes[1..], + actual_sizes[..], + ); + } +} diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs new file mode 100644 index 0000000000000..0adb0294d5080 --- /dev/null +++ b/frame/merkle-mountain-range/src/mock.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use crate as pallet_mmr; + +use codec::{Encode, Decode}; +use frame_support::parameter_types; +use pallet_mmr_primitives::{LeafDataProvider, Compact}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{ + BlakeTwo256, Keccak256, IdentityLookup, + }, +}; +use sp_std::cell::RefCell; +use sp_std::prelude::*; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + MMR: pallet_mmr::{Module, Call, Storage}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = sp_core::sr25519::Public; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} + +impl Config for Test { + const INDEXING_PREFIX: &'static [u8] = b"mmr-"; + + type Hashing = Keccak256; + type Hash = H256; + type LeafData = Compact, LeafData)>; + type OnNewRoot = (); + type WeightInfo = (); +} + +#[derive(Encode, Decode, Clone, Default, Eq, PartialEq, Debug)] +pub struct LeafData { + pub a: u64, + pub b: Vec, +} + +impl LeafData { + pub fn new(a: u64) -> Self { + Self { + a, + b: Default::default(), + } + } +} + +thread_local! { + pub static LEAF_DATA: RefCell = RefCell::new(Default::default()); +} + +impl LeafDataProvider for LeafData { + type LeafData = Self; + + fn leaf_data() -> Self::LeafData { + LEAF_DATA.with(|r| r.borrow().clone()) + } +} diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs new file mode 100644 index 0000000000000..63e4ec2257066 --- /dev/null +++ b/frame/merkle-mountain-range/src/tests.rs @@ -0,0 +1,303 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use crate::mock::*; + +use frame_support::traits::OnInitialize; +use sp_core::{ + H256, + offchain::{ + testing::TestOffchainExt, + OffchainExt, + }, +}; +use pallet_mmr_primitives::{Proof, Compact}; + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() +} + +fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainExt::new(offchain)); +} + +fn new_block() -> u64 { + let number = frame_system::Module::::block_number() + 1; + let hash = H256::repeat_byte(number as u8); + LEAF_DATA.with(|r| r.borrow_mut().a = number); + + frame_system::Module::::initialize( + &number, + &hash, + &Default::default(), + frame_system::InitKind::Full, + ); + MMR::on_initialize(number) +} + +pub(crate) fn hex(s: &str) -> H256 { + s.parse().unwrap() +} + +type BlockNumber = ::BlockNumber; + +fn decode_node(v: Vec) -> mmr::Node< + ::Hashing, + ((BlockNumber, H256), LeafData), +> { + use crate::primitives::DataOrHash; + type A = DataOrHash::<::Hashing, (BlockNumber, H256)>; + type B = DataOrHash::<::Hashing, LeafData>; + type Node = mmr::Node<::Hashing, (A, B)>; + let tuple: Node = codec::Decode::decode(&mut &v[..]).unwrap(); + + match tuple { + mmr::Node::Data((DataOrHash::Data(a), DataOrHash::Data(b))) => mmr::Node::Data((a, b)), + mmr::Node::Hash(hash) => mmr::Node::Hash(hash), + _ => unreachable!(), + } +} + +fn init_chain(blocks: usize) { + // given + for _ in 0..blocks { + new_block(); + } +} + +#[test] +fn should_start_empty() { + let _ = env_logger::try_init(); + new_test_ext().execute_with(|| { + // given + assert_eq!( + crate::RootHash::::get(), + "0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap() + ); + assert_eq!(crate::NumberOfLeaves::::get(), 0); + assert_eq!(crate::Nodes::::get(0), None); + + // when + let weight = new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 1); + assert_eq!(crate::Nodes::::get(0), + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0"))); + assert_eq!( + crate::RootHash::::get(), + hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0") + ); + assert!(weight != 0); + }); +} + +#[test] +fn should_append_to_mmr_when_on_initialize_is_called() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + ext.execute_with(|| { + // when + new_block(); + new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 2); + assert_eq!(( + crate::Nodes::::get(0), + crate::Nodes::::get(1), + crate::Nodes::::get(2), + crate::Nodes::::get(3), + crate::RootHash::::get(), + ), ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705")), + Some(hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")), + None, + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + )); + }); + + // make sure the leaves end up in the offchain DB + ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); + assert_eq!(offchain_db.get(&MMR::offchain_key(0)).map(decode_node), Some(mmr::Node::Data(( + (0, H256::repeat_byte(1)), + LeafData::new(1), + )))); + assert_eq!(offchain_db.get(&MMR::offchain_key(1)).map(decode_node), Some(mmr::Node::Data(( + (1, H256::repeat_byte(2)), + LeafData::new(2), + )))); + assert_eq!(offchain_db.get(&MMR::offchain_key(2)).map(decode_node), Some(mmr::Node::Hash( + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854") + ))); + assert_eq!(offchain_db.get(&MMR::offchain_key(3)), None); +} + +#[test] +fn should_construct_larger_mmr_correctly() { + let _ = env_logger::try_init(); + new_test_ext().execute_with(|| { + // when + init_chain(7); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 7); + assert_eq!(( + crate::Nodes::::get(0), + crate::Nodes::::get(10), + crate::RootHash::::get(), + ), ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c")), + hex("e45e25259f7930626431347fa4dd9aae7ac83b4966126d425ca70ab343709d2c"), + )); + }); +} + +#[test] +fn should_generate_proofs_correctly() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proofs now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when generate proofs for all leaves + let proofs = (0_u64..crate::NumberOfLeaves::::get()) + .into_iter() + .map(|leaf_index| crate::Module::::generate_proof(leaf_index).unwrap()) + .collect::>(); + + // then + assert_eq!(proofs[0], (Compact::new(( + (0, H256::repeat_byte(1)).into(), + LeafData::new(1).into(), + )), Proof { + leaf_index: 0, + leaf_count: 7, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), + ], + })); + assert_eq!(proofs[4], (Compact::new(( + (4, H256::repeat_byte(5)).into(), + LeafData::new(5).into(), + )), Proof { + leaf_index: 4, + leaf_count: 7, + items: vec![ + hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), + hex("8ed25570209d8f753d02df07c1884ddb36a3d9d4770e4608b188322151c657fe"), + hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c"), + ], + })); + assert_eq!(proofs[6], (Compact::new(( + (6, H256::repeat_byte(7)).into(), + LeafData::new(7).into(), + )), Proof { + leaf_index: 6, + leaf_count: 7, + items: vec![ + hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), + hex("7e4316ae2ebf7c3b6821cb3a46ca8b7a4f9351a9b40fcf014bb0a4fd8e8f29da"), + ], + })); + }); +} + +#[test] +fn should_verify() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaf, proof5) = ext.execute_with(|| { + // when + crate::Module::::generate_proof(5).unwrap() + }); + + // Now to verify the proof, we really shouldn't require offchain storage or extension. + // Hence we initialize the storage once again, using different externalities and then + // verify. + let mut ext2 = new_test_ext(); + ext2.execute_with(|| { + init_chain(7); + // then + assert_eq!(crate::Module::::verify_leaf(leaf, proof5), Ok(())); + }); +} + +#[test] +fn verification_should_be_stateless() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaf, proof5) = ext.execute_with(|| { + // when + crate::Module::::generate_proof(5).unwrap() + }); + let root = ext.execute_with(|| crate::Module::::mmr_root_hash()); + + // Verify proof without relying on any on-chain data. + let leaf = crate::primitives::DataOrHash::Data(leaf); + assert_eq!(crate::verify_leaf_proof::<::Hashing, _>(root, leaf, proof5), Ok(())); +} + +#[test] +fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + + ext.persist_offchain_overlay(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + // when + let (leaf, proof5) = crate::Module::::generate_proof(5).unwrap(); + new_block(); + + // then + assert_eq!(crate::Module::::verify_leaf(leaf, proof5), Ok(())); + }); +} diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index 2934b15562c43..cede8a836123d 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-metadata" -version = "12.0.0" +version = "13.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/metadata/src/lib.rs b/frame/metadata/src/lib.rs index 109f33f420191..a63da82ca00db 100644 --- a/frame/metadata/src/lib.rs +++ b/frame/metadata/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,7 +53,7 @@ pub enum DecodeDifferent where B: 'static, O: 'static { } impl Encode for DecodeDifferent where B: Encode + 'static, O: Encode + 'static { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { match self { DecodeDifferent::Encode(b) => b.encode_to(dest), DecodeDifferent::Decoded(o) => o.encode_to(dest), @@ -139,7 +139,7 @@ pub struct FunctionArgumentMetadata { pub struct FnEncode(pub fn() -> E) where E: Encode + 'static; impl Encode for FnEncode { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { self.0().encode_to(dest); } } @@ -238,7 +238,7 @@ pub struct DefaultByteGetter(pub &'static dyn DefaultByte); pub type ByteGetter = DecodeDifferent>; impl Encode for DefaultByteGetter { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { self.0.default_byte().encode_to(dest) } } @@ -374,7 +374,7 @@ pub enum RuntimeMetadata { pub enum RuntimeMetadataDeprecated { } impl Encode for RuntimeMetadataDeprecated { - fn encode_to(&self, _dest: &mut W) {} + fn encode_to(&self, _dest: &mut W) {} } impl codec::EncodeLike for RuntimeMetadataDeprecated {} diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 2be66ebb722c1..e8d625138371e 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-multisig" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/multisig/README.md b/frame/multisig/README.md index 2209e876f8441..a18ef74163d09 100644 --- a/frame/multisig/README.md +++ b/frame/multisig/README.md @@ -24,6 +24,6 @@ not available or desired. * `cancel_as_multi` - Cancel a call from a composite origin. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index bf89ec8b09bd4..748223072b99b 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,7 +29,7 @@ use crate::Module as Multisig; const SEED: u32 = 0; -fn setup_multi(s: u32, z: u32) +fn setup_multi(s: u32, z: u32) -> Result<(Vec, Vec), &'static str> { let mut signatories: Vec = Vec::new(); @@ -42,20 +42,18 @@ fn setup_multi(s: u32, z: u32) } signatories.sort(); // Must first convert to outer call type. - let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); let call_data = call.encode(); return Ok((signatories, call_data)) } benchmarks! { - _ { } - as_multi_threshold_1 { // Transaction Length let z in 0 .. 10_000; let max_signatories = T::MaxSignatories::get().into(); let (mut signatories, _) = setup_multi::(max_signatories, z)?; - let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, 1); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 873508259a8d7..a015f291bc716 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ //! # Multisig Module //! A module for doing multisig dispatch. //! -//! - [`multisig::Trait`](./trait.Trait.html) +//! - [`multisig::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -41,7 +41,7 @@ //! * `cancel_as_multi` - Cancel a call from a composite origin. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -62,14 +62,14 @@ use frame_system::{self as system, ensure_signed, RawOrigin}; use sp_runtime::{DispatchError, DispatchResult, traits::{Dispatchable, Zero}}; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Just a bunch of bytes, but they should decode to a valid `Call`. pub type OpaqueCall = Vec; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable @@ -123,7 +123,7 @@ pub struct Multisig { } decl_storage! { - trait Store for Module as Multisig { + trait Store for Module as Multisig { /// The set of open multisig operations. pub Multisigs: double_map hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] @@ -134,7 +134,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Threshold must be 2 or greater. MinimumThreshold, /// Call is already approved by this signatory. @@ -169,8 +169,8 @@ decl_error! { decl_event! { /// Events type. pub enum Event where - AccountId = ::AccountId, - BlockNumber = ::BlockNumber, + AccountId = ::AccountId, + BlockNumber = ::BlockNumber, CallHash = [u8; 32] { /// A new multisig operation has begun. \[approving, multisig, call_hash\] @@ -191,7 +191,7 @@ enum CallOrHash { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// Deposit one of this module's events by using the default implementation. @@ -223,16 +223,19 @@ decl_module! { /// - DB Weight: None /// - Plus Call Weight /// # - #[weight = ( - T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) - .saturating_add(call.get_dispatch_info().weight) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class, - )] + #[weight = { + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) + .saturating_add(dispatch_info.weight) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + }] fn as_multi_threshold_1(origin, other_signatories: Vec, - call: Box<::Call>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let max_sigs = T::MaxSignatories::get() as usize; @@ -443,7 +446,7 @@ decl_module! { } } -impl Module { +impl Module { /// Derive a multi-account ID from the sorted list of accounts and the threshold that are /// required. /// @@ -615,7 +618,7 @@ impl Module { } /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { + fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { maybe_known.map_or_else(|| { Calls::::get(hash).and_then(|(data, ..)| { Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index ca15e04597eaa..78301b2b69f77 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,45 +22,37 @@ use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, traits::Filter, + assert_ok, assert_noop, parameter_types, traits::Filter, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as multisig; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - multisig, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - multisig::Multisig, +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use crate as pallet_multisig; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Multisig: pallet_multisig::{Module, Call, Storage, Event}, } -} +); -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -70,29 +62,23 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; - type Event = TestEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -114,8 +100,8 @@ impl Filter for TestBaseCallFilter { } } } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type Currency = Balances; type DepositBase = DepositBase; @@ -123,9 +109,6 @@ impl Trait for Test { type MaxSignatories = MaxSignatories; type WeightInfo = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Multisig = Module; use pallet_balances::Call as BalancesCall; use pallet_balances::Error as BalancesError; @@ -140,11 +123,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn last_event() -> TestEvent { +fn last_event() -> Event { system::Module::::events().pop().map(|e| e.event).expect("Event expected") } -fn expect_event>(e: E) { +fn expect_event>(e: E) { assert_eq!(last_event(), e.into()); } diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index ab55b181f5a50..f67e0c8868afa 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -58,7 +58,7 @@ pub trait WeightInfo { /// Weights for pallet_multisig using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn as_multi_threshold_1(z: u32, ) -> Weight { (14_183_000 as Weight) .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 8f348d665b7e3..611f492b81f24 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-nicks" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/nicks/README.md b/frame/nicks/README.md index b4c88eff43152..766108470bedf 100644 --- a/frame/nicks/README.md +++ b/frame/nicks/README.md @@ -20,6 +20,6 @@ have not been designed to be economically secure. Do not use this pallet as-is i * `kill_name` - Forcibly remove the associated name; the deposit is lost. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index ddeadfb7680fe..681a45626fbca 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Nicks Module //! -//! - [`nicks::Trait`](./trait.Trait.html) +//! - [`nicks::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -37,7 +37,7 @@ //! * `kill_name` - Forcibly remove the associated name; the deposit is lost. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -51,12 +51,12 @@ use frame_support::{ }; use frame_system::ensure_signed; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency trait. type Currency: ReservableCurrency; @@ -78,14 +78,14 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Nicks { + trait Store for Module as Nicks { /// The lookup table for names. NameOf: map hasher(twox_64_concat) T::AccountId => Option<(Vec, BalanceOf)>; } } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// A name was set. \[who\] NameSet(AccountId), /// A name was forcibly set. \[target\] @@ -101,7 +101,7 @@ decl_event!( decl_error! { /// Error for the nicks module. - pub enum Error for Module { + pub enum Error for Module { /// A name is too short. TooShort, /// A name is too long. @@ -113,7 +113,7 @@ decl_error! { decl_module! { /// Nicks module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -239,63 +239,66 @@ decl_module! { #[cfg(test)] mod tests { use super::*; + use crate as pallet_nicks; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types - }; + use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; use sp_core::H256; use frame_system::EnsureSignedBy; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, }; - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Nicks: pallet_nicks::{Module, Call, Storage, Event}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -309,8 +312,8 @@ mod tests { ord_parameter_types! { pub const One: u64 = 1; } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type Currency = Balances; type ReservationFee = ReservationFee; type Slashed = (); @@ -318,9 +321,6 @@ mod tests { type MinLength = MinLength; type MaxLength = MaxLength; } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Nicks = Module; fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index 1448e99bd2a14..db77f25c18871 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -13,13 +13,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 91f89ad1d9100..f1f70e9eacd4b 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -76,9 +76,9 @@ impl WeightInfo for () { fn remove_connections() -> Weight { 50_000_000 } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The event type of this module. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The maximum number of well known nodes that are allowed to set type MaxWellKnownNodes: Get; @@ -103,7 +103,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as NodeAuthorization { + trait Store for Module as NodeAuthorization { /// The set of well known nodes. This is stored sorted (just by value). pub WellKnownNodes get(fn well_known_nodes): BTreeSet; /// A map that maintains the ownership of each node. @@ -123,7 +123,7 @@ decl_storage! { decl_event! { pub enum Event where - ::AccountId, + ::AccountId, { /// The given well known node was added. NodeAdded(PeerId, AccountId), @@ -149,7 +149,7 @@ decl_event! { decl_error! { /// Error for the node authorization module. - pub enum Error for Module { + pub enum Error for Module { /// The PeerId is too long. PeerIdTooLong, /// Too many well known nodes. @@ -170,7 +170,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The maximum number of authorized well known nodes const MaxWellKnownNodes: u32 = T::MaxWellKnownNodes::get(); @@ -267,7 +267,7 @@ decl_module! { pub fn reset_well_known_nodes(origin, nodes: Vec<(PeerId, T::AccountId)>) { T::ResetOrigin::ensure_origin(origin)?; ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); - + Self::initialize_nodes(&nodes); Self::deposit_event(RawEvent::NodesReset(nodes)); @@ -280,7 +280,7 @@ decl_module! { #[weight = T::WeightInfo::claim_node()] pub fn claim_node(origin, node: PeerId) { let sender = ensure_signed(origin)?; - + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); ensure!(!Owners::::contains_key(&node),Error::::AlreadyClaimed); @@ -403,7 +403,7 @@ decl_module! { } } -impl Module { +impl Module { fn initialize_nodes(nodes: &Vec<(PeerId, T::AccountId)>) { let peer_ids = nodes.iter() .map(|item| item.0.clone()) @@ -431,54 +431,55 @@ impl Module { #[cfg(test)] mod tests { use super::*; + use crate as pallet_node_authorization; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, weights::Weight, - parameter_types, ord_parameter_types, - }; + use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; use frame_system::EnsureSignedBy; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + NodeAuthorization: pallet_node_authorization::{ + Module, Call, Storage, Config, Event, + }, + } + ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } ord_parameter_types! { @@ -491,8 +492,8 @@ mod tests { pub const MaxWellKnownNodes: u32 = 4; pub const MaxPeerIdLength: u32 = 2; } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type MaxWellKnownNodes = MaxWellKnownNodes; type MaxPeerIdLength = MaxPeerIdLength; type AddOrigin = EnsureSignedBy; @@ -502,15 +503,13 @@ mod tests { type WeightInfo = (); } - type NodeAuthorization = Module; - fn test_node(id: u8) -> PeerId { PeerId(vec![id]) } fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { + pallet_node_authorization::GenesisConfig:: { nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], }.assimilate_storage(&mut t).unwrap(); t.into() diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index c5c8881007c22..3232d5f3ae5dd 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,18 +13,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 7a95cebc4fb21..a27b6c3012e3d 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences-benchmarking" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,27 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../../system" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../babe" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../balances" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../grandpa" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../im-online" } -pallet-offences = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } -pallet-session = { version = "2.0.0", default-features = false, path = "../../session" } -pallet-staking = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../benchmarking" } +frame-support = { version = "3.0.0", default-features = false, path = "../../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../../system" } +pallet-babe = { version = "3.0.0", default-features = false, path = "../../babe" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../../balances" } +pallet-grandpa = { version = "3.0.0", default-features = false, path = "../../grandpa" } +pallet-im-online = { version = "3.0.0", default-features = false, path = "../../im-online" } +pallet-offences = { version = "3.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } +pallet-session = { version = "3.0.0", default-features = false, path = "../../session" } +pallet-staking = { version = "3.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../../primitives/staking" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -pallet-staking-reward-curve = { version = "2.0.0", path = "../../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } +pallet-staking-reward-curve = { version = "3.0.0", path = "../../staking/reward-curve" } +pallet-timestamp = { version = "3.0.0", path = "../../timestamp" } serde = { version = "1.0.101" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 47055eab73d4a..57672f13ed711 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,22 +24,22 @@ mod mock; use sp_std::prelude::*; use sp_std::vec; -use frame_system::{RawOrigin, Module as System, Trait as SystemTrait}; +use frame_system::{RawOrigin, Module as System, Config as SystemConfig}; use frame_benchmarking::{benchmarks, account}; -use frame_support::traits::{Currency, OnInitialize}; +use frame_support::traits::{Currency, OnInitialize, ValidatorSet, ValidatorSetWithIdentification}; use sp_runtime::{Perbill, traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}}; use sp_staking::offence::{ReportOffence, Offence, OffenceDetails}; -use pallet_balances::{Trait as BalancesTrait}; +use pallet_balances::Config as BalancesConfig; use pallet_babe::BabeEquivocationOffence; use pallet_grandpa::{GrandpaEquivocationOffence, GrandpaTimeSlot}; -use pallet_im_online::{Trait as ImOnlineTrait, Module as ImOnline, UnresponsivenessOffence}; -use pallet_offences::{Trait as OffencesTrait, Module as Offences}; -use pallet_session::historical::{Trait as HistoricalTrait, IdentificationTuple}; -use pallet_session::{Trait as SessionTrait, SessionManager}; +use pallet_im_online::{Config as ImOnlineConfig, Module as ImOnline, UnresponsivenessOffence}; +use pallet_offences::{Config as OffencesConfig, Module as Offences}; +use pallet_session::historical::{Config as HistoricalConfig, IdentificationTuple}; +use pallet_session::{Config as SessionConfig, SessionManager}; use pallet_staking::{ - Module as Staking, Trait as StakingTrait, RewardDestination, ValidatorPrefs, + Module as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, Exposure, IndividualExposure, ElectionStatus, MAX_NOMINATIONS, Event as StakingEvent }; @@ -50,47 +50,47 @@ const MAX_OFFENDERS: u32 = 100; const MAX_NOMINATORS: u32 = 100; const MAX_DEFERRED_OFFENCES: u32 = 100; -pub struct Module(Offences); +pub struct Module(Offences); -pub trait Trait: - SessionTrait - + StakingTrait - + OffencesTrait - + ImOnlineTrait - + HistoricalTrait - + BalancesTrait +pub trait Config: + SessionConfig + + StakingConfig + + OffencesConfig + + ImOnlineConfig + + HistoricalConfig + + BalancesConfig + IdTupleConvert {} /// A helper trait to make sure we can convert `IdentificationTuple` coming from historical /// and the one required by offences. -pub trait IdTupleConvert { +pub trait IdTupleConvert { /// Convert identification tuple from `historical` trait to the one expected by `offences`. - fn convert(id: IdentificationTuple) -> ::IdentificationTuple; + fn convert(id: IdentificationTuple) -> ::IdentificationTuple; } -impl IdTupleConvert for T where - ::IdentificationTuple: From> +impl IdTupleConvert for T where + ::IdentificationTuple: From> { - fn convert(id: IdentificationTuple) -> ::IdentificationTuple { + fn convert(id: IdentificationTuple) -> ::IdentificationTuple { id.into() } } -type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type LookupSourceOf = <::Lookup as StaticLookup>::Source; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -struct Offender { +struct Offender { pub controller: T::AccountId, pub stash: T::AccountId, pub nominator_stashes: Vec, } -fn bond_amount() -> BalanceOf { +fn bond_amount() -> BalanceOf { T::Currency::minimum_balance().saturating_mul(10_000u32.into()) } -fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { +fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { let stash: T::AccountId = account("stash", n, SEED); let controller: T::AccountId = account("controller", n, SEED); let controller_lookup: LookupSourceOf = T::Lookup::unlookup(controller.clone()); @@ -109,6 +109,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &'s let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), + .. Default::default() }; Staking::::validate(RawOrigin::Signed(controller.clone()).into(), validator_prefs)?; @@ -149,7 +150,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &'s Ok(Offender { controller, stash, nominator_stashes }) } -fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< +fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< (Vec>, Vec>), &'static str > { @@ -165,18 +166,46 @@ fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< let id_tuples = offenders.iter() .map(|offender| - ::ValidatorIdOf::convert(offender.controller.clone()) + ::ValidatorIdOf::convert(offender.controller.clone()) .expect("failed to get validator id from account id")) .map(|validator_id| - ::FullIdentificationOf::convert(validator_id.clone()) + ::FullIdentificationOf::convert(validator_id.clone()) .map(|full_id| (validator_id, full_id)) .expect("failed to convert validator id to full identification")) .collect::>>(); Ok((id_tuples, offenders)) } +fn make_offenders_im_online(num_offenders: u32, num_nominators: u32) -> Result< + (Vec>, Vec>), + &'static str +> { + Staking::::new_session(0); + + let mut offenders = vec![]; + for i in 0 .. num_offenders { + let offender = create_offender::(i + 1, num_nominators)?; + offenders.push(offender); + } + + Staking::::start_session(0); + + let id_tuples = offenders.iter() + .map(|offender| < + ::ValidatorSet as ValidatorSet + >::ValidatorIdOf::convert(offender.controller.clone()) + .expect("failed to get validator id from account id")) + .map(|validator_id| < + ::ValidatorSet as ValidatorSetWithIdentification + >::IdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification")) + .collect::>>(); + Ok((id_tuples, offenders)) +} + #[cfg(test)] -fn check_events::Event>>(expected: I) { +fn check_events::Event>>(expected: I) { let events = System::::events() .into_iter() .map(|frame_system::EventRecord { event, .. }| event).collect::>(); let expected = expected.collect::>(); @@ -203,8 +232,6 @@ fn check_events::Event>>(expecte } benchmarks! { - _ { } - report_offence_im_online { let r in 1 .. MAX_REPORTERS; // we skip 1 offender, because in such case there is no slashing @@ -221,7 +248,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (offenders, raw_offenders) = make_offenders::(o, n)?; + let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; let keys = ImOnline::::keys(); let validator_set_count = keys.len() as u32; @@ -235,7 +262,7 @@ benchmarks! { }; assert_eq!(System::::event_count(), 0); }: { - let _ = ::ReportUnresponsiveness::report_offence( + let _ = ::ReportUnresponsiveness::report_offence( reporters.clone(), offence ); @@ -250,14 +277,14 @@ benchmarks! { .flat_map(|offender| { core::iter::once(offender.stash).chain(offender.nominator_stashes.into_iter()) }) - .map(|stash| ::Event::from( + .map(|stash| ::Event::from( StakingEvent::::Slash(stash, BalanceOf::::from(slash_amount)) )) .collect::>(); let reward_events = reporters.into_iter() .flat_map(|reporter| vec![ frame_system::Event::::NewAccount(reporter.clone()).into(), - ::Event::from( + ::Event::from( pallet_balances::Event::::Endowed(reporter, (reward_amount / r).into()) ).into() ]); @@ -272,7 +299,7 @@ benchmarks! { .chain(slash_events.into_iter().map(Into::into)) .chain(reward_events) .chain(slash_rest.into_iter().map(Into::into)) - .chain(std::iter::once(::Event::from( + .chain(std::iter::once(::Event::from( pallet_offences::Event::Offence( UnresponsivenessOffence::::ID, 0_u32.to_le_bytes().to_vec(), @@ -332,7 +359,7 @@ benchmarks! { let keys = ImOnline::::keys(); let offence = BabeEquivocationOffence { - slot: 0, + slot: 0u64.into(), session_index: 0, validator_set_count: keys.len() as u32, offender: T::convert(offenders.pop().unwrap()), diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 527e0ede81ab9..e4ec32d0bc3bf 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,10 +26,10 @@ use frame_support::{ }; use frame_system as system; use sp_runtime::{ - traits::{IdentityLookup, Block as BlockT}, + traits::IdentityLookup, testing::{Header, UintAuthorityId}, }; - +use pallet_session::historical as pallet_session_historical; type AccountId = u64; type AccountIndex = u32; @@ -37,11 +37,15 @@ type BlockNumber = u64; type Balance = u64; parameter_types! { - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -53,24 +57,18 @@ impl frame_system::Trait for Test { type Header = sp_runtime::testing::Header; type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = (Balances,); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = Balance; type Event = Event; @@ -83,13 +81,13 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -120,7 +118,7 @@ parameter_types! { pub const Offset: u64 = 0; } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -132,6 +130,7 @@ impl pallet_session::Trait for Test { type DisabledValidatorsThreshold = (); type WeightInfo = (); } + pallet_staking_reward_curve::build! { const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( min_inflation: 0_025_000, @@ -149,7 +148,7 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -174,9 +173,10 @@ impl pallet_staking::Trait for Test { type WeightInfo = (); } -impl pallet_im_online::Trait for Test { +impl pallet_im_online::Config for Test { type AuthorityId = UintAuthorityId; type Event = Event; + type ValidatorSet = Historical; type SessionDuration = Period; type ReportUnresponsiveness = Offences; type UnsignedPriority = (); @@ -184,10 +184,10 @@ impl pallet_im_online::Trait for Test { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Test { +impl pallet_offences::Config for Test { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; @@ -199,7 +199,7 @@ impl frame_system::offchain::SendTransactionTypes for Test where Call: Fro type OverarchingCall = Call; } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -216,6 +216,7 @@ frame_support::construct_runtime!( Session: pallet_session::{Module, Call, Storage, Event, Config}, ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, Offences: pallet_offences::{Module, Call, Storage, Event}, + Historical: pallet_session_historical::{Module}, } ); diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index e72498273cec0..5c1247853da1a 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -42,11 +42,11 @@ use codec::{Encode, Decode}; type OpaqueTimeSlot = Vec; /// A type alias for a report identifier. -type ReportIdOf = ::Hash; +type ReportIdOf = ::Hash; /// Type of data stored as a deferred offence pub type DeferredOffenceOf = ( - Vec::AccountId, ::IdentificationTuple>>, + Vec::AccountId, ::IdentificationTuple>>, Vec, SessionIndex, ); @@ -66,9 +66,9 @@ impl WeightInfo for () { } /// Offences trait -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// Full identification of the validator. type IdentificationTuple: Parameter + Ord; /// A handler called for every offence report. @@ -80,7 +80,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Offences { + trait Store for Module as Offences { /// The primary structure that holds all offence records keyed by report identifiers. Reports get(fn reports): map hasher(twox_64_concat) ReportIdOf @@ -116,7 +116,7 @@ decl_event!( ); decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; fn on_initialize(now: T::BlockNumber) -> Weight { @@ -158,7 +158,7 @@ decl_module! { } } -impl> +impl> ReportOffence for Module where T::IdentificationTuple: Clone, @@ -210,7 +210,7 @@ where } } -impl Module { +impl Module { /// Tries (without checking) to report an offence. Stores them in [`DeferredOffences`] in case /// it fails. Returns false in case it has to store the offence. fn report_or_store_offence( @@ -293,7 +293,7 @@ impl Module { } } -struct TriageOutcome { +struct TriageOutcome { /// Other reports for the same report kinds. concurrent_offenders: Vec>, } @@ -304,13 +304,13 @@ struct TriageOutcome { /// This struct is responsible for aggregating storage writes and the underlying storage should not /// accessed directly meanwhile. #[must_use = "The changes are not saved without called `save`"] -struct ReportIndexStorage> { +struct ReportIndexStorage> { opaque_time_slot: OpaqueTimeSlot, concurrent_reports: Vec>, same_kind_reports: Vec<(O::TimeSlot, ReportIdOf)>, } -impl> ReportIndexStorage { +impl> ReportIndexStorage { /// Preload indexes from the storage for the specific `time_slot` and the kind of the offence. fn load(time_slot: &O::TimeSlot) -> Self { let opaque_time_slot = time_slot.encode(); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 58ee97a9bcbb5..c47a9cf943c18 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,7 @@ #![cfg(test)] use std::cell::RefCell; -use crate::{Module, Trait}; +use crate::Config; use codec::Encode; use sp_runtime::Perbill; use sp_staking::{ @@ -31,14 +31,10 @@ use sp_runtime::testing::Header; use sp_runtime::traits::{IdentityLookup, BlakeTwo256}; use sp_core::H256; use frame_support::{ - impl_outer_origin, impl_outer_event, parameter_types, StorageMap, StorageDoubleMap, + parameter_types, StorageMap, StorageDoubleMap, weights::{Weight, constants::{WEIGHT_PER_SECOND, RocksDbWeight}}, }; -use frame_system as system; - -impl_outer_origin!{ - pub enum Origin for Runtime {} -} +use crate as offences; pub struct OnOffenceHandler; @@ -86,65 +82,62 @@ pub fn set_offence_weight(new: Weight) { OFFENCE_WEIGHT.with(|w| *w.borrow_mut() = new); } -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Runtime; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Offences: offences::{Module, Call, Storage, Event}, + } +); + parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = + Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl Trait for Runtime { - type Event = TestEvent; +impl Config for Runtime { + type Event = Event; type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; type WeightSoftLimit = OffencesWeightSoftLimit; } -mod offences { - pub use crate::Event; -} - -impl_outer_event! { - pub enum TestEvent for Runtime { - system, - offences, - } -} - pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let mut ext = sp_io::TestExternalities::new(t); @@ -152,10 +145,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Offences module. -pub type Offences = Module; -pub type System = frame_system::Module; - pub const KIND: [u8; 16] = *b"test_report_1234"; /// Returns all offence details for the specific `kind` happened at the specific time slot. diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index ca9f46a198820..2b7c500dfa2d9 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use super::*; use crate::mock::{ - Offences, System, Offence, TestEvent, KIND, new_test_ext, with_on_offence_fractions, + Offences, System, Offence, Event, KIND, new_test_ext, with_on_offence_fractions, offence_reports, set_can_report, set_offence_weight, }; use sp_runtime::Perbill; @@ -132,7 +132,7 @@ fn should_deposit_event() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), + event: Event::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), topics: vec![], }] ); @@ -167,7 +167,7 @@ fn doesnt_deposit_event_for_dups() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), + event: Event::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), topics: vec![], }] ); @@ -304,7 +304,7 @@ fn should_queue_and_resubmit_rejected_offence() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, 42u128.encode(), false)), + event: Event::offences(crate::Event::Offence(KIND, 42u128.encode(), false)), topics: vec![], }] ); @@ -342,7 +342,7 @@ fn weight_soft_limit_is_used() { new_test_ext().execute_with(|| { set_can_report(false); // Only 2 can fit in one block - set_offence_weight(::WeightSoftLimit::get() / 2); + set_offence_weight(::WeightSoftLimit::get() / 2); // Queue 3 offences // #1 diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 219e72502e0e4..9490364abd879 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-proxy" -version = "2.0.1" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,20 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-utility = { version = "2.0.0", path = "../utility" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-utility = { version = "3.0.0", path = "../utility" } [features] default = ["std"] diff --git a/frame/proxy/README.md b/frame/proxy/README.md index 26969db638289..20c4d2bf20b82 100644 --- a/frame/proxy/README.md +++ b/frame/proxy/README.md @@ -16,6 +16,6 @@ reject the announcement and in doing so, veto the execution. ### Dispatchable Functions [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 5f1d79741dd8e..29c2e475c64ff 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,15 +27,15 @@ use crate::Module as Proxy; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } -fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { +fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| whitelisted_caller()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..n { @@ -49,7 +49,7 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), Ok(()) } -fn add_announcements( +fn add_announcements( n: u32, maybe_who: Option, maybe_real: Option @@ -80,18 +80,14 @@ fn add_announcements( } benchmarks! { - _ { - let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; - } - proxy { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) @@ -99,14 +95,14 @@ benchmarks! { proxy_announced { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("anonymous", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real.clone(), @@ -120,13 +116,13 @@ benchmarks! { remove_announcement { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real.clone(), @@ -141,13 +137,13 @@ benchmarks! { reject_announcement { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real.clone(), @@ -162,14 +158,14 @@ benchmarks! { announce { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); let call_hash = T::CallHasher::hash_of(&call); }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) verify { @@ -177,7 +173,7 @@ benchmarks! { } add_proxy { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( RawOrigin::Signed(caller.clone()), @@ -191,7 +187,7 @@ benchmarks! { } remove_proxy { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( RawOrigin::Signed(caller.clone()), @@ -205,7 +201,7 @@ benchmarks! { } remove_proxies { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller.clone())) verify { @@ -214,7 +210,7 @@ benchmarks! { } anonymous { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( RawOrigin::Signed(caller.clone()), diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 75ab3902dc8db..1e5aaadcc62d3 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ //! wish to execute some duration prior to execution happens. In this case, the target account may //! reject the announcement and in doing so, veto the execution. //! -//! - [`proxy::Trait`](./trait.Trait.html) +//! - [`proxy::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -33,7 +33,7 @@ //! ### Dispatchable Functions //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -55,17 +55,17 @@ use frame_system::{self as system, ensure_signed}; use frame_support::dispatch::DispatchError; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable + GetDispatchInfo + From> + IsSubType> - + IsType<::Call>; + + IsType<::Call>; /// The currency mechanism. type Currency: ReservableCurrency; @@ -74,7 +74,7 @@ pub trait Trait: frame_system::Trait { /// The instance filter determines whether a given call may be proxied under this type. /// /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. - type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> + type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> + Default; /// The base amount of currency needed to reserve for creating a proxy. @@ -137,10 +137,10 @@ pub struct Announcement { height: BlockNumber, } -type CallHashOf = <::CallHasher as Hash>::Output; +type CallHashOf = <::CallHasher as Hash>::Output; decl_storage! { - trait Store for Module as Proxy { + trait Store for Module as Proxy { /// The set of account proxies. Maps the account which has delegated to the accounts /// which are being delegated to, together with the amount held on deposit. pub Proxies get(fn proxies): map hasher(twox_64_concat) T::AccountId @@ -153,7 +153,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// There are too many proxies registered or too many announcements pending. TooMany, /// Proxy registration not found. @@ -168,14 +168,16 @@ decl_error! { NoPermission, /// Announcement, if made at all, was made too recently. Unannounced, + /// Cannot add self as proxy. + NoSelfProxy, } } decl_event! { /// Events type. pub enum Event where - AccountId = ::AccountId, - ProxyType = ::ProxyType, + AccountId = ::AccountId, + ProxyType = ::ProxyType, Hash = CallHashOf, { /// A proxy was executed correctly, with the given \[result\]. @@ -189,7 +191,7 @@ decl_event! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// Deposit one of this module's events by using the default implementation. @@ -239,7 +241,7 @@ decl_module! { fn proxy(origin, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call>, + call: Box<::Call>, ) { let who = ensure_signed(origin)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; @@ -509,7 +511,7 @@ decl_module! { delegate: T::AccountId, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call>, + call: Box<::Call>, ) { ensure_signed(origin)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; @@ -525,7 +527,7 @@ decl_module! { } } -impl Module { +impl Module { /// Calculate the address of an anonymous account. /// @@ -567,6 +569,7 @@ impl Module { proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { + ensure!(delegator != &delegatee, Error::::NoSelfProxy); Proxies::::try_mutate(delegator, |(ref mut proxies, ref mut deposit)| { ensure!(proxies.len() < T::MaxProxies::get() as usize, Error::::TooMany); let proxy_def = ProxyDefinition { delegate: delegatee, proxy_type, delay }; @@ -680,12 +683,12 @@ impl Module { fn do_proxy( def: ProxyDefinition, real: T::AccountId, - call: ::Call, + call: ::Call, ) { // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); + origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); // We make sure the proxy call does access this pallet to change modify proxies. match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already has. @@ -706,7 +709,7 @@ impl Module { pub mod migration { use super::*; - /// Migration code for https://github.com/paritytech/substrate/pull/6770 + /// Migration code for /// /// Details: This migration was introduced between Substrate 2.0-RC6 and Substrate 2.0 releases. /// Before this migration, the `Proxies` storage item used a tuple of `AccountId` and @@ -714,7 +717,7 @@ pub mod migration { /// `ProxyDefinition` which additionally included a `BlockNumber` delay value. This function, /// simply takes any existing proxies using the old tuple format, and migrates it to the new /// struct by setting the delay to zero. - pub fn migrate_to_time_delayed_proxies() -> Weight { + pub fn migrate_to_time_delayed_proxies() -> Weight { Proxies::::translate::<(Vec<(T::AccountId, T::ProxyType)>, BalanceOf), _>( |_, (targets, deposit)| Some(( targets.into_iter() @@ -727,6 +730,6 @@ pub mod migration { deposit, )) ); - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index bcf3b678ed644..b31ef1dfdb2fe 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,47 +22,39 @@ use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, RuntimeDebug, dispatch::DispatchError, traits::Filter, + assert_ok, assert_noop, parameter_types, RuntimeDebug, dispatch::DispatchError, traits::Filter, }; use codec::{Encode, Decode}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use crate as proxy; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - proxy, - pallet_utility, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - proxy::Proxy, - pallet_utility::Utility, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Proxy: proxy::{Module, Call, Storage, Event}, + Utility: pallet_utility::{Module, Call, Event}, } -} +); -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -72,36 +64,30 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; - type Event = TestEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } -impl pallet_utility::Trait for Test { - type Event = TestEvent; +impl pallet_utility::Config for Test { + type Event = Event; type Call = Call; type WeightInfo = (); } @@ -143,8 +129,8 @@ impl Filter for BaseFilter { } } } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type Currency = Balances; type ProxyType = ProxyType; @@ -158,11 +144,6 @@ impl Trait for Test { type AnnouncementDepositFactor = AnnouncementDepositFactor; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Utility = pallet_utility::Module; -type Proxy = Module; - use frame_system::Call as SystemCall; use pallet_balances::Call as BalancesCall; use pallet_balances::Error as BalancesError; @@ -181,19 +162,19 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn last_event() -> TestEvent { +fn last_event() -> Event { system::Module::::events().pop().expect("Event expected").event } -fn expect_event>(e: E) { +fn expect_event>(e: E) { assert_eq!(last_event(), e.into()); } -fn last_events(n: usize) -> Vec { +fn last_events(n: usize) -> Vec { system::Module::::events().into_iter().rev().take(n).rev().map(|e| e.event).collect() } -fn expect_events(e: Vec) { +fn expect_events(e: Vec) { assert_eq!(last_events(e.len()), e); } @@ -321,7 +302,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { #[test] fn filtering_works() { new_test_ext().execute_with(|| { - Balances::mutate_account(&1, |a| a.free = 1000); + assert!(Balances::mutate_account(&1, |a| a.free = 1000).is_ok()); assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0)); assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); @@ -335,7 +316,7 @@ fn filtering_works() { expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); let derivative_id = Utility::derivative_account_id(1, 0); - Balances::mutate_account(&derivative_id, |a| a.free = 1000); + assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); let inner = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); let call = Box::new(Call::Utility(UtilityCall::as_derivative(0, inner.clone()))); @@ -401,6 +382,7 @@ fn add_remove_proxies_works() { assert_eq!(Balances::reserved_balance(1), 2); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); + assert_noop!(Proxy::add_proxy(Origin::signed(1), 1, ProxyType::Any, 0), Error::::NoSelfProxy); }); } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 944fe53a149c9..92cf66120dfb3 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -58,7 +58,7 @@ pub trait WeightInfo { /// Weights for pallet_proxy using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn proxy(p: u32, ) -> Weight { (32_194_000 as Weight) .saturating_add((215_000 as Weight).saturating_mul(p as Weight)) diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index d35f6960af5d9..285326ef1e9a6 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +serde = { version = "1.0.101" } [features] default = ["std"] diff --git a/frame/randomness-collective-flip/README.md b/frame/randomness-collective-flip/README.md index 2af18d3d2f7b5..9885c734d9fad 100644 --- a/frame/randomness-collective-flip/README.md +++ b/frame/randomness-collective-flip/README.md @@ -22,10 +22,10 @@ the system trait. ```rust use frame_support::{decl_module, dispatch, traits::Randomness}; -pub trait Trait: frame_system::Trait {} +pub trait Config: frame_system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn random_module_example(origin) -> dispatch::DispatchResult { let _random_value = >::random(&b"my context"[..]); diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index c1747669dab07..0dba6727da60a 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,10 +39,10 @@ //! ``` //! use frame_support::{decl_module, dispatch, traits::Randomness}; //! -//! pub trait Trait: frame_system::Trait {} +//! pub trait Config: frame_system::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn random_module_example(origin) -> dispatch::DispatchResult { //! let _random_value = >::random(&b"my context"[..]); @@ -63,18 +63,18 @@ use frame_support::{ }; use safe_mix::TripletMix; use codec::Encode; -use frame_system::Trait; +use frame_system::Config; const RANDOM_MATERIAL_LEN: u32 = 81; -fn block_number_to_index(block_number: T::BlockNumber) -> usize { +fn block_number_to_index(block_number: T::BlockNumber) -> usize { // on_initialize is called on the first block after genesis let index = (block_number - 1u32.into()) % RANDOM_MATERIAL_LEN.into(); index.try_into().ok().expect("Something % 81 is always smaller than usize; qed") } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn on_initialize(block_number: T::BlockNumber) -> Weight { let parent_hash = >::parent_hash(); @@ -91,7 +91,7 @@ decl_module! { } decl_storage! { - trait Store for Module as RandomnessCollectiveFlip { + trait Store for Module as RandomnessCollectiveFlip { /// Series of block headers from the last 81 blocks that acts as random seed material. This /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of /// the oldest hash. @@ -99,7 +99,7 @@ decl_storage! { } } -impl Randomness for Module { +impl Randomness for Module { /// This randomness uses a low-influence function, drawing upon the block hashes from the /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone /// observing the chain. Any block producer has significant influence over their block hashes @@ -132,62 +132,63 @@ impl Randomness for Module { #[cfg(test)] mod tests { + use crate as pallet_randomness_collective_flip; use super::*; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, Header as _, IdentityLookup}, }; - use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, traits::{Randomness, OnInitialize}, - }; - - #[derive(Clone, PartialEq, Eq)] - pub struct Test; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + use frame_system::limits; + use frame_support::{parameter_types, traits::{Randomness, OnInitialize}}; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + CollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, + } + ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights + ::simple_max(1024); + pub BlockLength: limits::BlockLength = limits::BlockLength + ::max(2 * 1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } - type System = frame_system::Module; - type CollectiveFlip = Module; - fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); t.into() @@ -208,7 +209,6 @@ mod tests { &i, &parent_hash, &Default::default(), - &Default::default(), frame_system::InitKind::Full, ); CollectiveFlip::on_initialize(i); diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 0ba2f5437c614..80450db0bd39b 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-recovery" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/recovery/README.md b/frame/recovery/README.md index b6d3ae5aceeb3..c45df2c666af6 100644 --- a/frame/recovery/README.md +++ b/frame/recovery/README.md @@ -131,4 +131,4 @@ of this pallet are: * `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow one account to access another. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index c97824497fded..00cd6ff2a7f79 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Recovery Pallet //! -//! - [`recovery::Trait`](./trait.Trait.html) +//! - [`recovery::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -172,12 +172,12 @@ mod mock; mod tests; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable + GetDispatchInfo; @@ -237,7 +237,7 @@ pub struct RecoveryConfig { } decl_storage! { - trait Store for Module as Recovery { + trait Store for Module as Recovery { /// The set of recoverable accounts and their recovery configuration. pub Recoverable get(fn recovery_config): map hasher(twox_64_concat) T::AccountId @@ -262,7 +262,7 @@ decl_storage! { decl_event! { /// Events type. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, { /// A recovery process has been set up for an \[account\]. RecoveryCreated(AccountId), @@ -284,7 +284,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// User is not allowed to make a call on behalf of this account NotAllowed, /// Threshold must be greater than zero @@ -317,11 +317,13 @@ decl_error! { Overflow, /// This account is already set up for recovery AlreadyProxy, + /// Some internal state is broken. + BadState, } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The base amount of currency needed to reserve for creating a recovery configuration. @@ -352,16 +354,19 @@ decl_module! { /// - The weight of the `call` + 10,000. /// - One storage lookup to check account is recovered by `who`. O(1) /// # - #[weight = ( - call.get_dispatch_info().weight - .saturating_add(10_000) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class - )] + #[weight = { + let dispatch_info = call.get_dispatch_info(); + ( + dispatch_info.weight + .saturating_add(10_000) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + }] fn as_recovered(origin, account: T::AccountId, - call: Box<::Call> + call: Box<::Call> ) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` @@ -583,9 +588,9 @@ decl_module! { recovery_config.threshold as usize <= active_recovery.friends.len(), Error::::Threshold ); + system::Module::::inc_consumers(&who).map_err(|_| Error::::BadState)?; // Create the recovery storage item Proxy::::insert(&who, &account); - system::Module::::inc_ref(&who); Self::deposit_event(RawEvent::AccountRecovered(account, who)); } @@ -672,12 +677,12 @@ decl_module! { // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); Proxy::::remove(&who); - system::Module::::dec_ref(&who); + system::Module::::dec_consumers(&who); } } } -impl Module { +impl Module { /// Check that friends list is sorted and has no duplicates. fn is_sorted_and_unique(friends: &Vec) -> bool { friends.windows(2).all(|w| w[0] < w[1]) diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 35373562487f7..ee38b0e24cc60 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,47 +19,39 @@ use super::*; -use frame_support::{ - impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, - traits::{OnInitialize, OnFinalize}, -}; +use frame_support::{parameter_types, traits::{OnInitialize, OnFinalize}}; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use crate as recovery; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - recovery, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_balances::Balances, - recovery::Recovery, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Recovery: recovery::{Module, Call, Storage, Event}, } -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -69,32 +61,26 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u128; type DustRemoval = (); - type Event = TestEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -107,8 +93,8 @@ parameter_types! { pub const RecoveryDeposit: u64 = 10; } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; @@ -117,10 +103,6 @@ impl Trait for Test { type RecoveryDeposit = RecoveryDeposit; } -pub type Recovery = Module; -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; - pub type BalancesCall = pallet_balances::Call; pub type RecoveryCall = super::Call; diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index 8e9484f0fb089..4c7c6ef108d72 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 613762bb689e9..eef287d86771b 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scheduler" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -11,18 +11,18 @@ readme = "README.md" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } +substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md index 47beb71e3a0d1..3d07818b15d5e 100644 --- a/frame/scheduler/README.md +++ b/frame/scheduler/README.md @@ -12,7 +12,7 @@ specified block number or at a specified period. These scheduled dispatches may be named or anonymous and may be canceled. **NOTE:** The scheduled calls will be dispatched with the default filter -for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +for the origin: namely `frame_system::Config::BaseCallFilter` for all origin except root which will get no filter. And not the filter contained in origin use to call `fn schedule`. @@ -31,4 +31,4 @@ then those filter will not be used when dispatching the schedule call. `Vec` parameter that can be used for identification. * `cancel_named` - the named complement to the cancel function. -License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 753e9244628ad..defc334ba7368 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,7 +31,7 @@ use frame_system::Module as System; const BLOCK_NUMBER: u32 = 2; // Add `n` named items to the schedule -fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { +fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { // Essentially a no-op call. let call = frame_system::Call::set_storage(vec![]); for i in 0..n { @@ -52,8 +52,6 @@ fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static } benchmarks! { - _ { } - schedule { let s in 0 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index c467678a466de..a869fae27d8b3 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ //! # Scheduler //! A module for scheduling dispatches. //! -//! - [`scheduler::Trait`](./trait.Trait.html) +//! - [`scheduler::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -29,7 +29,7 @@ //! may be named or anonymous and may be canceled. //! //! **NOTE:** The scheduled calls will be dispatched with the default filter -//! for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +//! for the origin: namely `frame_system::Config::BaseCallFilter` for all origin //! except root which will get no filter. And not the filter contained in origin //! use to call `fn schedule`. //! @@ -70,27 +70,27 @@ pub use weights::WeightInfo; /// pallet is dependent on specific other pallets, then their configuration traits /// should be added to our implied traits list. /// -/// `system::Trait` should always be included in our implied traits. -pub trait Trait: system::Trait { +/// `system::Config` should always be included in our implied traits. +pub trait Config: system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The aggregated origin which the dispatch will take. type Origin: OriginTrait + From + IsType<::Origin>; + Self::PalletsOrigin> + From + IsType<::Origin>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: From> + Codec + Clone + Eq; /// The aggregated call type. - type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo + From>; + type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo + From>; /// The maximum weight that may be scheduled per block for any dispatchables of less priority /// than `schedule::HARD_DEADLINE`. type MaximumWeight: Get; /// Required origin to schedule or cancel calls. - type ScheduleOrigin: EnsureOrigin<::Origin>; + type ScheduleOrigin: EnsureOrigin<::Origin>; /// The maximum number of scheduled calls in the queue for a single block. /// Not strictly enforced, but used for weight estimation. @@ -150,10 +150,10 @@ impl Default for Releases { } decl_storage! { - trait Store for Module as Scheduler { + trait Store for Module as Scheduler { /// Items to be executed, indexed by the block number that they should be executed on. pub Agenda: map hasher(twox_64_concat) T::BlockNumber - => Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>; + => Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>; /// Lookup from identity to the block number and index of the task. Lookup: map hasher(twox_64_concat) Vec => Option>; @@ -166,7 +166,7 @@ decl_storage! { } decl_event!( - pub enum Event where ::BlockNumber { + pub enum Event where ::BlockNumber { /// Scheduled some task. \[when, index\] Scheduled(BlockNumber, u32), /// Canceled some task. \[when, index\] @@ -177,7 +177,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Failed to schedule a call FailedToSchedule, /// Cannot find the scheduled call. @@ -191,7 +191,7 @@ decl_error! { decl_module! { /// Scheduler module declaration. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; fn deposit_event() = default; @@ -210,10 +210,10 @@ decl_module! { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule(DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), *call)?; } @@ -230,7 +230,7 @@ decl_module! { #[weight = T::WeightInfo::cancel(T::MaxScheduledPerBlock::get())] fn cancel(origin, when: T::BlockNumber, index: u32) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; } @@ -250,10 +250,10 @@ decl_module! { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( id, DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), *call )?; @@ -272,7 +272,7 @@ decl_module! { #[weight = T::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get())] fn cancel_named(origin, id: Vec) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; } @@ -286,10 +286,10 @@ decl_module! { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule( DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), *call )?; @@ -306,10 +306,10 @@ decl_module! { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( id, DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), *call )?; @@ -347,7 +347,7 @@ decl_module! { *cumulative_weight = cumulative_weight .saturating_add(s.call.get_dispatch_info().weight); - let origin = <::Origin as From>::from( + let origin = <::Origin as From>::from( s.origin.clone() ).into(); @@ -415,7 +415,7 @@ decl_module! { } } -impl Module { +impl Module { /// Migrate storage format from V1 to V2. /// Return true if migration is performed. pub fn migrate_v1_to_t2() -> bool { @@ -423,7 +423,7 @@ impl Module { StorageVersion::put(Releases::V2); Agenda::::translate::< - Vec::Call, T::BlockNumber>>>, _ + Vec::Call, T::BlockNumber>>>, _ >(|_, agenda| Some( agenda .into_iter() @@ -447,7 +447,7 @@ impl Module { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ + Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ >(|_, agenda| Some( agenda .into_iter() @@ -485,7 +485,7 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call + call: ::Call ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; @@ -569,7 +569,7 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call, + call: ::Call, ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -657,7 +657,7 @@ impl Module { } } -impl schedule::Anon::Call, T::PalletsOrigin> for Module { +impl schedule::Anon::Call, T::PalletsOrigin> for Module { type Address = TaskAddress; fn schedule( @@ -665,7 +665,7 @@ impl schedule::Anon::Call, T::PalletsOrig maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call + call: ::Call ) -> Result { Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -686,7 +686,7 @@ impl schedule::Anon::Call, T::PalletsOrig } } -impl schedule::Named::Call, T::PalletsOrigin> for Module { +impl schedule::Named::Call, T::PalletsOrigin> for Module { type Address = TaskAddress; fn schedule_named( @@ -695,7 +695,7 @@ impl schedule::Named::Call, T::PalletsOri maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call, + call: ::Call, ) -> Result { Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } @@ -721,7 +721,7 @@ mod tests { use super::*; use frame_support::{ - impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, ord_parameter_types, + parameter_types, assert_ok, ord_parameter_types, assert_noop, assert_err, Hashable, traits::{OnInitialize, OnFinalize, Filter}, weights::constants::RocksDbWeight, @@ -746,8 +746,8 @@ mod tests { pub fn log() -> Vec<(OriginCaller, u32)> { LOG.with(|log| log.borrow().clone()) } - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; + pub trait Config: system::Config { + type Event: From + Into<::Event>; } decl_event! { pub enum Event { @@ -755,10 +755,10 @@ mod tests { } } decl_module! { - pub struct Module for enum Call + pub struct Module for enum Call where - origin: ::Origin, - ::Origin: OriginTrait + origin: ::Origin, + ::Origin: OriginTrait { fn deposit_event() = default; @@ -781,24 +781,20 @@ mod tests { } } - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - system::System, - logger::Logger, - } - } - - impl_outer_event! { - pub enum Event for Test { - system, - logger, - scheduler, + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Logger: logger::{Module, Call, Event}, + Scheduler: scheduler::{Module, Call, Storage, Event}, } - } + ); // Scheduler must dispatch with root and no filter, this tests base filter is indeed not used. pub struct BaseFilter; @@ -808,16 +804,16 @@ mod tests { } } - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 2_000_000_000_000; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); } - impl system::Trait for Test { + impl system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = u64; @@ -827,35 +823,29 @@ mod tests { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } - impl logger::Trait for Test { - type Event = (); + impl logger::Config for Test { + type Event = Event; } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 10; } ord_parameter_types! { pub const One: u64 = 1; } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; @@ -864,9 +854,6 @@ mod tests { type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = (); } - type System = system::Module; - type Logger = logger::Module; - type Scheduler = Module; pub fn new_test_ext() -> sp_io::TestExternalities { let t = system::GenesisConfig::default().build_storage::().unwrap(); @@ -889,7 +876,7 @@ mod tests { fn basic_scheduling_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call)); run_to_block(3); assert!(logger::log().is_empty()); @@ -905,7 +892,7 @@ mod tests { new_test_ext().execute_with(|| { run_to_block(2); let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call)); run_to_block(5); @@ -922,7 +909,7 @@ mod tests { new_test_ext().execute_with(|| { run_to_block(2); let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call)); // Will trigger on the next block. run_to_block(3); @@ -960,7 +947,7 @@ mod tests { fn reschedule_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_eq!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call).unwrap(), (4, 0)); run_to_block(3); @@ -985,7 +972,7 @@ mod tests { fn reschedule_named_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_eq!(Scheduler::do_schedule_named( 1u32.encode(), DispatchTime::At(4), None, 127, root(), call ).unwrap(), (4, 0)); @@ -1012,7 +999,7 @@ mod tests { fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_eq!(Scheduler::do_schedule_named( 1u32.encode(), DispatchTime::At(4), Some((3, 3)), 127, root(), call ).unwrap(), (4, 0)); @@ -1203,10 +1190,10 @@ mod tests { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2); + let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2); let base_multiplier = 0; - let named_multiplier = ::DbWeight::get().writes(1); - let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); + let named_multiplier = ::DbWeight::get().writes(1); + let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); // Named assert_ok!( diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 3699e6f85b234..0508930f4ef20 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -52,7 +52,7 @@ pub trait WeightInfo { /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn schedule(s: u32, ) -> Weight { (35_029_000 as Weight) .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index b36bade8e925a..e5e71dba68881 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scored-pool" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index 948d5b497721b..8f7198a5e11de 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -41,10 +41,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_scored_pool::{self as scored_pool}; -pub trait Trait: scored_pool::Trait {} +pub trait Config: scored_pool::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn candidate(origin) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; @@ -63,4 +63,4 @@ decl_module! { This module depends on the [System module](https://docs.rs/frame-system/latest/frame_system/). -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 90d4aca4e42a4..ce2279b150050 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +37,7 @@ //! from the `Pool` and `Members`; the entity is immediately replaced //! by the next highest scoring candidate in the pool, if available. //! -//! - [`scored_pool::Trait`](./trait.Trait.html) +//! - [`scored_pool::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -58,10 +58,10 @@ //! use frame_system::ensure_signed; //! use pallet_scored_pool::{self as scored_pool}; //! -//! pub trait Trait: scored_pool::Trait {} +//! pub trait Config: scored_pool::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn candidate(origin) -> dispatch::DispatchResult { //! let who = ensure_signed(origin)?; @@ -103,8 +103,8 @@ use frame_support::{ use frame_system::{ensure_root, ensure_signed}; use sp_runtime::traits::{AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +type PoolT = Vec<(::AccountId, Option<>::Score>)>; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated @@ -116,7 +116,7 @@ enum ChangeReceiver { MembershipChanged, } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The currency used for deposits. type Currency: Currency + ReservableCurrency; @@ -125,7 +125,7 @@ pub trait Trait: frame_system::Trait { AtLeast32Bit + Clone + Copy + Default + FullCodec + MaybeSerializeDeserialize + Debug; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; // The deposit which is reserved from candidates if they want to // start a candidacy. The deposit gets returned when the candidacy is @@ -156,7 +156,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { + trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { /// The current pool of candidates, stored as an ordered Vec /// (ordered descending by score, `None` last, highest first). Pool get(fn pool) config(): PoolT; @@ -204,7 +204,7 @@ decl_storage! { decl_event!( pub enum Event where - ::AccountId, + ::AccountId, { /// The given member was removed. See the transaction for who. MemberRemoved, @@ -225,7 +225,7 @@ decl_event!( decl_error! { /// Error for the scored-pool module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Already a member. AlreadyInPool, /// Index out of bounds. @@ -236,7 +236,7 @@ decl_error! { } decl_module! { - pub struct Module, I: Instance=DefaultInstance> + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { @@ -275,7 +275,7 @@ decl_module! { // can be inserted as last element in pool, since entities with // `None` are always sorted to the end. - >::append((who.clone(), Option::<>::Score>::None)); + >::append((who.clone(), Option::<>::Score>::None)); >::insert(&who, true); @@ -382,7 +382,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Fetches the `MemberCount` highest scoring members from /// `Pool` and puts them into `Members`. diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 59c0dc66cca60..3c4263b813e41 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,69 +18,73 @@ //! Test utilities use super::*; +use crate as pallet_scored_pool; use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight, ord_parameter_types}; +use frame_support::{parameter_types, ord_parameter_types}; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use frame_system::EnsureSignedBy; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + ScoredPool: pallet_scored_pool::{Module, Call, Storage, Config, Event}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const CandidateDeposit: u64 = 25; pub const Period: u64 = 4; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { pub const KickOrigin: u64 = 2; pub const ScoreOrigin: u64 = 3; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -114,8 +118,8 @@ impl InitializeMembers for TestChangeMembers { } } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type KickOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; @@ -126,9 +130,6 @@ impl Trait for Test { type ScoreOrigin = EnsureSignedBy; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; - pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { @@ -142,7 +143,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { (99, 1), ], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ + pallet_scored_pool::GenesisConfig::{ pool: vec![ (5, None), (10, Some(1)), diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 44b71bc00ba47..8f33f30f6ed8e 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index ea3a3d3cdf7fa..5b8fe6e2d137e 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,21 +14,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -sp-trie = { version = "2.0.0", optional = true, default-features = false, path = "../../primitives/trie" } -impl-trait-for-tuples = "0.1.3" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "3.0.0", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../timestamp" } +sp-trie = { version = "3.0.0", optional = true, default-features = false, path = "../../primitives/trie" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "3.0.0", path = "../../primitives/application-crypto" } lazy_static = "1.4.0" [features] diff --git a/frame/session/README.md b/frame/session/README.md index 60da8958f73d0..e1f8b7f8e0238 100644 --- a/frame/session/README.md +++ b/frame/session/README.md @@ -71,7 +71,7 @@ The [Staking pallet](https://docs.rs/pallet-staking/latest/pallet_staking/) uses ```rust use pallet_session as session; -fn validators() -> Vec<::ValidatorId> { +fn validators() -> Vec<::ValidatorId> { >::validators() } ``` @@ -80,4 +80,4 @@ fn validators() -> Vec<::V - [Staking](https://docs.rs/pallet-staking/latest/pallet_staking/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index dea05934cd872..bf5a9a9617b1f 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session-benchmarking" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,24 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -frame-system = { version = "2.0.0", default-features = false, path = "../../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../../support" } -pallet-staking = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -pallet-session = { version = "2.0.0", default-features = false, path = "../../session" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-session = { version = "3.0.0", default-features = false, path = "../../../primitives/session" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "3.0.0", default-features = false, path = "../../system" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../benchmarking" } +frame-support = { version = "3.0.0", default-features = false, path = "../../support" } +pallet-staking = { version = "3.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-session = { version = "3.0.0", default-features = false, path = "../../session" } rand = { version = "0.7.2", default-features = false } [dev-dependencies] serde = { version = "1.0.101" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../../staking/reward-curve" } -sp-io ={ version = "2.0.0", path = "../../../primitives/io" } -pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } -pallet-balances = { version = "2.0.0", path = "../../balances" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +pallet-staking-reward-curve = { version = "3.0.0", path = "../../staking/reward-curve" } +sp-io ={ version = "3.0.0", path = "../../../primitives/io" } +pallet-timestamp = { version = "3.0.0", path = "../../timestamp" } +pallet-balances = { version = "3.0.0", path = "../../balances" } [features] default = ["std"] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 277200b269569..06dfa3da34943 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,7 +28,7 @@ use sp_std::vec; use frame_benchmarking::benchmarks; use frame_support::{ codec::Decode, - storage::{StorageValue, StorageMap}, + storage::StorageValue, traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; @@ -41,18 +41,16 @@ use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; -pub struct Module(pallet_session::Module); -pub trait Trait: pallet_session::Trait + pallet_session::historical::Trait + pallet_staking::Trait {} +pub struct Module(pallet_session::Module); +pub trait Config: pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config {} -impl OnInitialize for Module { +impl OnInitialize for Module { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { pallet_session::Module::::on_initialize(n) } } benchmarks! { - _ { } - set_keys { let n = MAX_NOMINATIONS as u32; let (v_stash, _) = create_validator_with_nominators::( @@ -121,7 +119,7 @@ benchmarks! { /// Sets up the benchmark for checking a membership proof. It creates the given /// number of validators, sets random session keys and then creates a membership /// proof for the first authority and returns its key and the proof. -fn check_membership_proof_setup( +fn check_membership_proof_setup( n: u32, ) -> ( (sp_runtime::KeyTypeId, &'static [u8; 32]), diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 6a9cfc5f98a1b..b25b169c82edb 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,33 +20,34 @@ #![cfg(test)] use sp_runtime::traits::IdentityLookup; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; +use frame_support::parameter_types; type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; type Balance = u64; -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Staking = pallet_staking::Module; -type Session = pallet_session::Module; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_staking::Staking, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, } -} +); -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; - -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -56,29 +57,23 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = Balances; + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = Balance; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -88,13 +83,13 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -120,13 +115,13 @@ impl pallet_session::SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; type SessionHandler = TestSessionHandler; - type Event = (); + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); @@ -157,12 +152,12 @@ impl frame_system::offchain::SendTransactionTypes for Test where type Extrinsic = Extrinsic; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = (); + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -182,7 +177,7 @@ impl pallet_staking::Trait for Test { type WeightInfo = (); } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 20c3d57464c89..9b4d2704cf456 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,8 +31,10 @@ use codec::{Encode, Decode}; use sp_runtime::KeyTypeId; use sp_runtime::traits::{Convert, OpaqueKeys}; use sp_session::{MembershipProof, ValidatorCount}; -use frame_support::{decl_module, decl_storage}; -use frame_support::{Parameter, print}; +use frame_support::{ + decl_module, decl_storage, Parameter, print, + traits::{ValidatorSet, ValidatorSetWithIdentification}, +}; use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; use sp_trie::trie_types::{TrieDBMut, TrieDB}; use super::{SessionIndex, Module as SessionModule}; @@ -41,8 +43,8 @@ mod shared; pub mod offchain; pub mod onchain; -/// Trait necessary for the historical module. -pub trait Trait: super::Trait { +/// Config necessary for the historical module. +pub trait Config: super::Config { /// Full identification of the validator. type FullIdentification: Parameter; @@ -57,7 +59,7 @@ pub trait Trait: super::Trait { } decl_storage! { - trait Store for Module as Session { + trait Store for Module as Session { /// Mapping from historical session indices to session-data root hash and validator count. HistoricalSessions get(fn historical_root): map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; @@ -71,10 +73,10 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } -impl Module { +impl Module { /// Prune historical stored session roots up to (but not including) /// `up_to`. pub fn prune_up_to(up_to: SessionIndex) { @@ -102,6 +104,24 @@ impl Module { } } +impl ValidatorSet for Module { + type ValidatorId = T::ValidatorId; + type ValidatorIdOf = T::ValidatorIdOf; + + fn session_index() -> sp_staking::SessionIndex { + super::Module::::current_index() + } + + fn validators() -> Vec { + super::Module::::validators() + } +} + +impl ValidatorSetWithIdentification for Module { + type Identification = T::FullIdentification; + type IdentificationOf = T::FullIdentificationOf; +} + /// Specialization of the crate-level `SessionManager` which returns the set of full identification /// when creating a new session. pub trait SessionManager: crate::SessionManager { @@ -116,7 +136,7 @@ pub trait SessionManager: crate::SessionManager /// sets the historical trie root of the ending session. pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); -impl crate::SessionManager for NoteHistoricalRoot +impl crate::SessionManager for NoteHistoricalRoot where I: SessionManager { fn new_session(new_index: SessionIndex) -> Option> { @@ -160,15 +180,15 @@ impl crate::SessionManager for NoteHistoricalRoot = (::ValidatorId, ::FullIdentification); +pub type IdentificationTuple = (::ValidatorId, ::FullIdentification); /// A trie instance for checking and generating proofs. -pub struct ProvingTrie { +pub struct ProvingTrie { db: MemoryDB, root: T::Hash, } -impl ProvingTrie { +impl ProvingTrie { fn generate_for(validators: I) -> Result where I: IntoIterator { @@ -260,7 +280,7 @@ impl ProvingTrie { } } -impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> +impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> for Module { type Proof = MembershipProof; @@ -327,16 +347,21 @@ pub(crate) mod tests { set_next_validators, Test, System, Session, }; use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; + use frame_support::BasicExternalities; type Historical = Module; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - crate::GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + ); + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Module::::inc_providers(k); + } + }); + crate::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); sp_io::TestExternalities::new(t) } diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 97655d1a18b32..38cf09124ccf6 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,18 +29,18 @@ use sp_runtime::{offchain::storage::StorageValueRef, KeyTypeId}; use sp_session::MembershipProof; use super::super::{Module as SessionModule, SessionIndex}; -use super::{IdentificationTuple, ProvingTrie, Trait}; +use super::{IdentificationTuple, ProvingTrie, Config}; use super::shared; use sp_std::prelude::*; /// A set of validators, which was used for a fixed session index. -struct ValidatorSet { +struct ValidatorSet { validator_set: Vec>, } -impl ValidatorSet { +impl ValidatorSet { /// Load the set of validators for a particular session index from the off-chain storage. /// /// If none is found or decodable given `prefix` and `session`, it will return `None`. @@ -61,7 +61,7 @@ impl ValidatorSet { /// Implement conversion into iterator for usage /// with [ProvingTrie](super::ProvingTrie::generate_for). -impl sp_std::iter::IntoIterator for ValidatorSet { +impl sp_std::iter::IntoIterator for ValidatorSet { type Item = (T::ValidatorId, T::FullIdentification); type IntoIter = sp_std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { @@ -74,7 +74,7 @@ impl sp_std::iter::IntoIterator for ValidatorSet { /// Based on the yielded `MembershipProof` the implementer may decide what /// to do, i.e. in case of a failed proof, enqueue a transaction back on /// chain reflecting that, with all its consequences such as i.e. slashing. -pub fn prove_session_membership>( +pub fn prove_session_membership>( session_index: SessionIndex, session_key: (KeyTypeId, D), ) -> Option { @@ -97,7 +97,7 @@ pub fn prove_session_membership>( /// Due to re-organisation it could be that the `first_to_keep` might be less /// than the stored one, in which case the conservative choice is made to keep records /// up to the one that is the lesser. -pub fn prune_older_than(first_to_keep: SessionIndex) { +pub fn prune_older_than(first_to_keep: SessionIndex) { let derived_key = shared::LAST_PRUNE.to_vec(); let entry = StorageValueRef::persistent(derived_key.as_ref()); match entry.mutate(|current: Option>| -> Result<_, ()> { @@ -127,7 +127,7 @@ pub fn prune_older_than(first_to_keep: SessionIndex) { } /// Keep the newest `n` items, and prune all items older than that. -pub fn keep_newest(n_to_keep: usize) { +pub fn keep_newest(n_to_keep: usize) { let session_index = >::current_index(); let n_to_keep = n_to_keep as SessionIndex; if n_to_keep < session_index { @@ -152,28 +152,27 @@ mod tests { }; use sp_runtime::testing::UintAuthorityId; + use frame_support::BasicExternalities; type Historical = Module; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext = frame_system::GenesisConfig::default() + let mut t = frame_system::GenesisConfig::default() .build_storage::() .expect("Failed to create test externalities."); - crate::GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| { - l.borrow() - .iter() - .cloned() - .map(|i| (i, i, UintAuthorityId(i).into())) - .collect() - }), - } - .assimilate_storage(&mut ext) - .unwrap(); + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + ); + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Module::::inc_providers(k); + } + }); + crate::GenesisConfig::{ keys }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(ext); + let mut ext = sp_io::TestExternalities::new(t); let (offchain, offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); @@ -189,12 +188,12 @@ mod tests { #[test] fn encode_decode_roundtrip() { use codec::{Decode, Encode}; - use super::super::super::Trait as SessionTrait; - use super::super::Trait as HistoricalTrait; + use super::super::super::Config as SessionConfig; + use super::super::Config as HistoricalConfig; let sample = ( - 22u32 as ::ValidatorId, - 7_777_777 as ::FullIdentification); + 22u32 as ::ValidatorId, + 7_777_777 as ::FullIdentification); let encoded = sample.encode(); let decoded = Decode::decode(&mut encoded.as_slice()).expect("Must decode"); diff --git a/frame/session/src/historical/onchain.rs b/frame/session/src/historical/onchain.rs index 745603a49829b..3b933bf262a00 100644 --- a/frame/session/src/historical/onchain.rs +++ b/frame/session/src/historical/onchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,9 @@ use codec::Encode; use sp_runtime::traits::Convert; -use super::super::Trait as SessionTrait; +use super::super::Config as SessionConfig; use super::super::{Module as SessionModule, SessionIndex}; -use super::Trait as HistoricalTrait; +use super::Config as HistoricalConfig; use super::shared; use sp_std::prelude::*; @@ -35,14 +35,14 @@ use sp_std::prelude::*; /// `on_initialize(..)` or `on_finalization(..)`. /// **Must** be called during the session, which validator-set is to be stored for further /// off-chain processing. Otherwise the `FullIdentification` might not be available. -pub fn store_session_validator_set_to_offchain( +pub fn store_session_validator_set_to_offchain( session_index: SessionIndex, ) { let encoded_validator_list = >::validators() .into_iter() - .filter_map(|validator_id: ::ValidatorId| { + .filter_map(|validator_id: ::ValidatorId| { let full_identification = - <::FullIdentificationOf>::convert(validator_id.clone()); + <::FullIdentificationOf>::convert(validator_id.clone()); full_identification.map(|full_identification| (validator_id, full_identification)) }) .collect::>(); @@ -55,8 +55,8 @@ pub fn store_session_validator_set_to_offchain() { +pub fn store_current_session_validator_set_to_offchain() { store_session_validator_set_to_offchain::(>::current_index()); } diff --git a/frame/session/src/historical/shared.rs b/frame/session/src/historical/shared.rs index fda0361b05959..b054854d88fe8 100644 --- a/frame/session/src/historical/shared.rs +++ b/frame/session/src/historical/shared.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index c0a8fc29165bd..64ec31ad99d07 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,10 @@ //! # Session Module //! -//! The Session module allows validators to manage their session keys, provides a function for changing -//! the session length, and handles session rotation. +//! The Session module allows validators to manage their session keys, provides a function for +//! changing the session length, and handles session rotation. //! -//! - [`session::Trait`](./trait.Trait.html) +//! - [`session::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -29,34 +29,39 @@ //! ### Terminology //! //! -//! - **Session:** A session is a period of time that has a constant set of validators. Validators can only join -//! or exit the validator set at a session change. It is measured in block numbers. The block where a session is -//! ended is determined by the `ShouldEndSession` trait. When the session is ending, a new validator set -//! can be chosen by `OnSessionEnding` implementations. -//! - **Session key:** A session key is actually several keys kept together that provide the various signing -//! functions required by network authorities/validators in pursuit of their duties. -//! - **Validator ID:** Every account has an associated validator ID. For some simple staking systems, this -//! may just be the same as the account ID. For staking systems using a stash/controller model, -//! the validator ID would be the stash account ID of the controller. +//! - **Session:** A session is a period of time that has a constant set of validators. Validators +//! can only join or exit the validator set at a session change. It is measured in block numbers. +//! The block where a session is ended is determined by the `ShouldEndSession` trait. When the +//! session is ending, a new validator set can be chosen by `OnSessionEnding` implementations. +//! +//! - **Session key:** A session key is actually several keys kept together that provide the various +//! signing functions required by network authorities/validators in pursuit of their duties. +//! - **Validator ID:** Every account has an associated validator ID. For some simple staking +//! systems, this may just be the same as the account ID. For staking systems using a +//! stash/controller model, the validator ID would be the stash account ID of the controller. +//! //! - **Session key configuration process:** Session keys are set using `set_keys` for use not in -//! the next session, but the session after next. They are stored in `NextKeys`, a mapping between -//! the caller's `ValidatorId` and the session keys provided. `set_keys` allows users to set their -//! session key prior to being selected as validator. -//! It is a public call since it uses `ensure_signed`, which checks that the origin is a signed account. -//! As such, the account ID of the origin stored in `NextKeys` may not necessarily be associated with -//! a block author or a validator. The session keys of accounts are removed once their account balance is zero. +//! the next session, but the session after next. They are stored in `NextKeys`, a mapping between +//! the caller's `ValidatorId` and the session keys provided. `set_keys` allows users to set their +//! session key prior to being selected as validator. It is a public call since it uses +//! `ensure_signed`, which checks that the origin is a signed account. As such, the account ID of +//! the origin stored in `NextKeys` may not necessarily be associated with a block author or a +//! validator. The session keys of accounts are removed once their account balance is zero. +//! //! - **Session length:** This pallet does not assume anything about the length of each session. -//! Rather, it relies on an implementation of `ShouldEndSession` to dictate a new session's start. -//! This pallet provides the `PeriodicSessions` struct for simple periodic sessions. -//! - **Session rotation configuration:** Configure as either a 'normal' (rewardable session where rewards are -//! applied) or 'exceptional' (slashable) session rotation. +//! Rather, it relies on an implementation of `ShouldEndSession` to dictate a new session's start. +//! This pallet provides the `PeriodicSessions` struct for simple periodic sessions. +//! +//! - **Session rotation configuration:** Configure as either a 'normal' (rewardable session where +//! rewards are applied) or 'exceptional' (slashable) session rotation. +//! //! - **Session rotation process:** At the beginning of each block, the `on_initialize` function -//! queries the provided implementation of `ShouldEndSession`. If the session is to end the newly -//! activated validator IDs and session keys are taken from storage and passed to the -//! `SessionHandler`. The validator set supplied by `SessionManager::new_session` and the corresponding session -//! keys, which may have been registered via `set_keys` during the previous session, are written -//! to storage where they will wait one session before being passed to the `SessionHandler` -//! themselves. +//! queries the provided implementation of `ShouldEndSession`. If the session is to end the newly +//! activated validator IDs and session keys are taken from storage and passed to the +//! `SessionHandler`. The validator set supplied by `SessionManager::new_session` and the +//! corresponding session keys, which may have been registered via `set_keys` during the previous +//! session, are written to storage where they will wait one session before being passed to the +//! `SessionHandler` themselves. //! //! ### Goals //! @@ -75,7 +80,7 @@ //! ### Public Functions //! //! - `rotate_session` - Change to the next session. Register the new authority set. Queue changes -//! for next session rotation. +//! for next session rotation. //! - `disable_index` - Disable a validator by index. //! - `disable` - Disable a validator by Validator ID //! @@ -83,13 +88,14 @@ //! //! ### Example from the FRAME //! -//! The [Staking pallet](../pallet_staking/index.html) uses the Session pallet to get the validator set. +//! The [Staking pallet](../pallet_staking/index.html) uses the Session pallet to get the validator +//! set. //! //! ``` //! use pallet_session as session; //! -//! fn validators() -> Vec<::ValidatorId> { -//! >::validators() +//! fn validators() -> Vec<::ValidatorId> { +//! >::validators() //! } //! # fn main(){} //! ``` @@ -110,13 +116,14 @@ pub mod weights; use sp_std::{prelude::*, marker::PhantomData, ops::{Sub, Rem}}; use codec::Decode; -use sp_runtime::{KeyTypeId, Perbill, RuntimeAppPublic, BoundToRuntimeAppPublic}; +use sp_runtime::{KeyTypeId, Perbill, RuntimeAppPublic}; use sp_runtime::traits::{Convert, Zero, Member, OpaqueKeys, Saturating}; use sp_staking::SessionIndex; use frame_support::{ ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId, Parameter, traits::{ Get, FindAuthor, ValidatorRegistration, EstimateNextSessionRotation, EstimateNextNewSession, + OneSessionHandler, ValidatorSet, }, dispatch::{self, DispatchResult, DispatchError}, weights::Weight, @@ -166,7 +173,7 @@ impl< period.saturating_sub(block_after_last_session) ) } else { - Zero::zero() + now } } else { offset @@ -174,10 +181,10 @@ impl< } fn weight(_now: BlockNumber) -> Weight { - // Weight note: `estimate_next_session_rotation` has no storage reads and trivial computational overhead. - // There should be no risk to the chain having this weight value be zero for now. - // However, this value of zero was not properly calculated, and so it would be reasonable - // to come back here and properly calculate the weight of this function. + // Weight note: `estimate_next_session_rotation` has no storage reads and trivial + // computational overhead. There should be no risk to the chain having this weight value be + // zero for now. However, this value of zero was not properly calculated, and so it would be + // reasonable to come back here and properly calculate the weight of this function. 0 } } @@ -186,17 +193,17 @@ impl< pub trait SessionManager { /// Plan a new session, and optionally provide the new validator set. /// - /// Even if the validator-set is the same as before, if any underlying economic - /// conditions have changed (i.e. stake-weights), the new validator set must be returned. - /// This is necessary for consensus engines making use of the session module to - /// issue a validator-set change so misbehavior can be provably associated with the new - /// economic conditions as opposed to the old. - /// The returned validator set, if any, will not be applied until `new_index`. - /// `new_index` is strictly greater than from previous call. + /// Even if the validator-set is the same as before, if any underlying economic conditions have + /// changed (i.e. stake-weights), the new validator set must be returned. This is necessary for + /// consensus engines making use of the session module to issue a validator-set change so + /// misbehavior can be provably associated with the new economic conditions as opposed to the + /// old. The returned validator set, if any, will not be applied until `new_index`. `new_index` + /// is strictly greater than from previous call. /// /// The first session start at index 0. /// - /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. + /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. In other + /// words, a new session must always be planned before an ongoing one can be finished. fn new_session(new_index: SessionIndex) -> Option>; /// End the session. /// @@ -205,7 +212,7 @@ pub trait SessionManager { fn end_session(end_index: SessionIndex); /// Start the session. /// - /// The session start to be used for validation + /// The session start to be used for validation. fn start_session(start_index: SessionIndex); } @@ -242,7 +249,7 @@ pub trait SessionHandler { /// A notification for end of the session. /// - /// Note it is triggered before any `SessionManager::end_session` handlers, + /// Note it is triggered before any [`SessionManager::end_session`] handlers, /// so we can still affect the validator set. fn on_before_session_ending() {} @@ -250,45 +257,9 @@ pub trait SessionHandler { fn on_disabled(validator_index: usize); } -/// A session handler for specific key type. -pub trait OneSessionHandler: BoundToRuntimeAppPublic { - /// The key type expected. - type Key: Decode + Default + RuntimeAppPublic; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator, ValidatorId: 'a; - - /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. - /// - /// `changed` is true when at least one of the session keys - /// or the underlying economic identities/distribution behind one the - /// session keys has changed, false otherwise. - /// - /// The `validators` are the validators of the incoming session, and `queued_validators` - /// will follow. - fn on_new_session<'a, I: 'a>( - changed: bool, - validators: I, - queued_validators: I, - ) where I: Iterator, ValidatorId: 'a; - - - /// A notification for end of the session. - /// - /// Note it is triggered before any `SessionManager::end_session` handlers, - /// so we can still affect the validator set. - fn on_before_session_ending() {} - - /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(_validator_index: usize); -} - #[impl_trait_for_tuples::impl_for_tuples(1, 30)] -#[tuple_types_no_default_trait_bound] +#[tuple_types_custom_trait_bound(OneSessionHandler)] impl SessionHandler for Tuple { - for_tuples!( where #( Tuple: OneSessionHandler )* ); - for_tuples!( const KEY_TYPE_IDS: &'static [KeyTypeId] = &[ #( ::ID ),* ]; ); @@ -346,15 +317,15 @@ impl SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl ValidatorRegistration for Module { +impl ValidatorRegistration for Module { fn is_registered(id: &T::ValidatorId) -> bool { Self::load_keys(id).is_some() } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// A stable ID for a validator. type ValidatorId: Member + Parameter; @@ -392,7 +363,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Session { + trait Store for Module as Session { /// The current set of validators. Validators get(fn validators): Vec; @@ -438,7 +409,7 @@ decl_storage! { for (account, val, keys) in config.keys.iter().cloned() { >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); - frame_system::Module::::inc_ref(&account); + assert!(frame_system::Module::::inc_consumers(&account).is_ok()); } let initial_validators_0 = T::SessionManager::new_session(0) @@ -483,7 +454,7 @@ decl_event!( decl_error! { /// Error for the session module. - pub enum Error for Module { + pub enum Error for Module { /// Invalid ownership proof. InvalidProof, /// No associated validator ID for account. @@ -492,11 +463,13 @@ decl_error! { DuplicatedKey, /// No keys are associated with this account. NoKeys, + /// Key setting account is not live, so it's impossible to associate keys. + NoAccount, } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -549,7 +522,7 @@ decl_module! { fn on_initialize(n: T::BlockNumber) -> Weight { if T::ShouldEndSession::should_end_session(n) { Self::rotate_session(); - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block } else { // NOTE: the non-database part of the weight for `should_end_session(n)` is // included as weight for empty block, the database part is expected to be in @@ -560,7 +533,7 @@ decl_module! { } } -impl Module { +impl Module { /// Move on to next session. Register new validator set and session keys. Changes /// to the validator set have a session of delay to take effect. This allows for /// equivocation punishment after a fork. @@ -683,6 +656,55 @@ impl Module { Self::validators().iter().position(|i| i == c).map(Self::disable_index).ok_or(()) } + /// Upgrade the key type from some old type to a new type. Supports adding + /// and removing key types. + /// + /// This function should be used with extreme care and only during an + /// `on_runtime_upgrade` block. Misuse of this function can put your blockchain + /// into an unrecoverable state. + /// + /// Care should be taken that the raw versions of the + /// added keys are unique for every `ValidatorId, KeyTypeId` combination. + /// This is an invariant that the session module typically maintains internally. + /// + /// As the actual values of the keys are typically not known at runtime upgrade, + /// it's recommended to initialize the keys to a (unique) dummy value with the expectation + /// that all validators should invoke `set_keys` before those keys are actually + /// required. + pub fn upgrade_keys(upgrade: F) where + Old: OpaqueKeys + Member + Decode, + F: Fn(T::ValidatorId, Old) -> T::Keys, + { + let old_ids = Old::key_ids(); + let new_ids = T::Keys::key_ids(); + + // Translate NextKeys, and key ownership relations at the same time. + >::translate::(|val, old_keys| { + // Clear all key ownership relations. Typically the overlap should + // stay the same, but no guarantees by the upgrade function. + for i in old_ids.iter() { + Self::clear_key_owner(*i, old_keys.get_raw(*i)); + } + + let new_keys = upgrade(val.clone(), old_keys); + + // And now set the new ones. + for i in new_ids.iter() { + Self::put_key_owner(*i, new_keys.get_raw(*i), &val); + } + + Some(new_keys) + }); + + let _ = >::translate::, _>( + |k| { + k.map(|k| k.into_iter() + .map(|(val, old_keys)| (val.clone(), upgrade(val, old_keys))) + .collect::>()) + } + ); + } + /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. /// /// This ensures that the reference counter in system is incremented appropriately and as such @@ -691,9 +713,11 @@ impl Module { let who = T::ValidatorIdOf::convert(account.clone()) .ok_or(Error::::NoAssociatedValidatorId)?; + frame_system::Module::::inc_consumers(&account).map_err(|_| Error::::NoAccount)?; let old_keys = Self::inner_set_keys(&who, keys)?; - if old_keys.is_none() { - frame_system::Module::::inc_ref(&account); + if old_keys.is_some() { + let _ = frame_system::Module::::dec_consumers(&account); + // ^^^ Defensive only; Consumers were incremented just before, so should never fail. } Ok(()) @@ -741,7 +765,7 @@ impl Module { let key_data = old_keys.get_raw(*id); Self::clear_key_owner(*id, key_data); } - frame_system::Module::::dec_ref(&account); + frame_system::Module::::dec_consumers(&account); Ok(()) } @@ -771,12 +795,25 @@ impl Module { } } +impl ValidatorSet for Module { + type ValidatorId = T::ValidatorId; + type ValidatorIdOf = T::ValidatorIdOf; + + fn session_index() -> sp_staking::SessionIndex { + Module::::current_index() + } + + fn validators() -> Vec { + Module::::validators() + } +} + /// Wraps the author-scraping logic for consensus engines that can recover /// the canonical index of an author. This then transforms it into the /// registering account-ID of that session key index. pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); -impl> FindAuthor +impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option @@ -789,7 +826,7 @@ impl> FindAuthor } } -impl EstimateNextNewSession for Module { +impl EstimateNextNewSession for Module { /// This session module always calls new_session and next_session at the same time, hence we /// do a simple proxy and pass the function to next rotation. fn estimate_next_new_session(now: T::BlockNumber) -> Option { diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 1d787ac53b438..73499bf739b87 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ use super::*; use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::{parameter_types, BasicExternalities}; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ Perbill, impl_opaque_keys, @@ -27,6 +27,9 @@ use sp_runtime::{ testing::{Header, UintAuthorityId}, }; use sp_staking::SessionIndex; +use crate as pallet_session; +#[cfg(feature = "historical")] +use crate::historical as pallet_session_historical; impl_opaque_keys! { pub struct MockSessionKeys { @@ -40,10 +43,59 @@ impl From for MockSessionKeys { } } -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} +pub const KEY_ID_A: KeyTypeId = KeyTypeId([4; 4]); +pub const KEY_ID_B: KeyTypeId = KeyTypeId([9; 4]); + +#[derive(Debug, Clone, codec::Encode, codec::Decode, PartialEq, Eq)] +pub struct PreUpgradeMockSessionKeys { + pub a: [u8; 32], + pub b: [u8; 64], +} + +impl OpaqueKeys for PreUpgradeMockSessionKeys { + type KeyTypeIdProviders = (); + + fn key_ids() -> &'static [KeyTypeId] { + &[KEY_ID_A, KEY_ID_B] + } + + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + i if i == KEY_ID_A => &self.a[..], + i if i == KEY_ID_B => &self.b[..], + _ => &[], + } + } } +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +#[cfg(feature = "historical")] +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + Historical: pallet_session_historical::{Module}, + } +); + +#[cfg(not(feature = "historical"))] +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + } +); + thread_local! { pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); @@ -153,54 +205,54 @@ pub fn reset_before_session_end_called() { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + ); + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Module::::inc_providers(k); + } + frame_system::Module::::inc_providers(&4); + // An additional identity that we use. + frame_system::Module::::inc_providers(&69); + }); + pallet_session::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); sp_io::TestExternalities::new(t) } -#[derive(Clone, Eq, PartialEq)] -pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const MinimumPeriod: u64 = 5; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -211,7 +263,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } -impl Trait for Test { +impl Config for Test { type ShouldEndSession = TestShouldEndSession; #[cfg(feature = "historical")] type SessionManager = crate::historical::NoteHistoricalRoot; @@ -221,17 +273,14 @@ impl Trait for Test { type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = MockSessionKeys; - type Event = (); + type Event = Event; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = (); type WeightInfo = (); } #[cfg(feature = "historical")] -impl crate::historical::Trait for Test { +impl crate::historical::Config for Test { type FullIdentification = u64; type FullIdentificationOf = sp_runtime::traits::ConvertInto; } - -pub type System = frame_system::Module; -pub type Session = Module; diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 75def78046beb..c876770c74bcc 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ // Tests for the Session Pallet use super::*; +use codec::Decode; use frame_support::{traits::OnInitialize, assert_ok}; use sp_core::crypto::key_types::DUMMY; use sp_runtime::testing::UintAuthorityId; @@ -25,6 +26,7 @@ use mock::{ SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, set_next_validators, set_session_length, session_changed, Origin, System, Session, reset_before_session_end_called, before_session_end_called, new_test_ext, + PreUpgradeMockSessionKeys, }; fn initialize_block(block: u64) { @@ -59,9 +61,9 @@ fn keys_cleared_on_kill() { let id = DUMMY; assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - assert!(!System::allow_death(&1)); + assert!(System::is_provider_required(&1)); assert_ok!(Session::purge_keys(Origin::signed(1))); - assert!(System::allow_death(&1)); + assert!(!System::is_provider_required(&1)); assert_eq!(Session::load_keys(&1), None); assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); @@ -251,31 +253,32 @@ fn session_changed_flag_works() { #[test] fn periodic_session_works() { - struct Period; - struct Offset; - impl Get for Period { - fn get() -> u64 { 10 } + frame_support::parameter_types! { + const Period: u64 = 10; + const Offset: u64 = 3; } - impl Get for Offset { - fn get() -> u64 { 3 } - } - - type P = PeriodicSessions; - for i in 0..3 { + for i in 0u64..3 { assert!(!P::should_end_session(i)); + assert_eq!(P::estimate_next_session_rotation(i).unwrap(), 3); } - assert!(P::should_end_session(3)); + assert!(P::should_end_session(3u64)); + assert_eq!(P::estimate_next_session_rotation(3u64).unwrap(), 3); - for i in (1..10).map(|i| 3 + i) { + for i in (1u64..10).map(|i| 3 + i) { assert!(!P::should_end_session(i)); + assert_eq!(P::estimate_next_session_rotation(i).unwrap(), 13); } - assert!(P::should_end_session(13)); + assert!(P::should_end_session(13u64)); + assert_eq!(P::estimate_next_session_rotation(13u64).unwrap(), 13); + + assert!(!P::should_end_session(14u64)); + assert_eq!(P::estimate_next_session_rotation(14u64).unwrap(), 23); } #[test] @@ -285,7 +288,7 @@ fn session_keys_generate_output_works_as_set_keys_input() { assert_ok!( Session::set_keys( Origin::signed(2), - ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), + ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), vec![], ) ); @@ -308,3 +311,97 @@ fn return_true_if_more_than_third_is_disabled() { assert_eq!(Session::disable_index(3), true); }); } + +#[test] +fn upgrade_keys() { + use frame_support::storage; + use mock::Test; + use sp_core::crypto::key_types::DUMMY; + + // This test assumes certain mocks. + assert_eq!(mock::NEXT_VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + assert_eq!(mock::VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + + new_test_ext().execute_with(|| { + let pre_one = PreUpgradeMockSessionKeys { + a: [1u8; 32], + b: [1u8; 64], + }; + + let pre_two = PreUpgradeMockSessionKeys { + a: [2u8; 32], + b: [2u8; 64], + }; + + let pre_three = PreUpgradeMockSessionKeys { + a: [3u8; 32], + b: [3u8; 64], + }; + + let val_keys = vec![ + (1u64, pre_one), + (2u64, pre_two), + (3u64, pre_three), + ]; + + // Set `QueuedKeys`. + { + let storage_key = >::hashed_key(); + assert!(storage::unhashed::exists(&storage_key)); + storage::unhashed::put(&storage_key, &val_keys); + } + + // Set `NextKeys`. + { + for &(i, ref keys) in val_keys.iter() { + let storage_key = >::hashed_key_for(i); + assert!(storage::unhashed::exists(&storage_key)); + storage::unhashed::put(&storage_key, keys); + } + } + + // Set `KeyOwner`. + { + for &(i, ref keys) in val_keys.iter() { + // clear key owner for `UintAuthorityId` keys set in genesis. + let presumed = UintAuthorityId(i); + let raw_prev = presumed.as_ref(); + + assert_eq!(Session::key_owner(DUMMY, raw_prev), Some(i)); + Session::clear_key_owner(DUMMY, raw_prev); + + Session::put_key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A), &i); + Session::put_key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B), &i); + } + } + + // Do the upgrade and check sanity. + let mock_keys_for = |val| mock::MockSessionKeys { dummy: UintAuthorityId(val) }; + Session::upgrade_keys::( + |val, _old_keys| mock_keys_for(val), + ); + + // Check key ownership. + for (i, ref keys) in val_keys.iter() { + assert!(Session::key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A)).is_none()); + assert!(Session::key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B)).is_none()); + + let migrated_key = UintAuthorityId(*i); + assert_eq!(Session::key_owner(DUMMY, migrated_key.as_ref()), Some(*i)); + } + + // Check queued keys. + assert_eq!( + Session::queued_keys(), + vec![ + (1, mock_keys_for(1)), + (2, mock_keys_for(2)), + (3, mock_keys_for(3)), + ], + ); + + for i in 1u64..4 { + assert_eq!(>::get(&i), Some(mock_keys_for(i))); + } + }) +} diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index f1fc18b0ef998..05d9f7d787315 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,7 +50,7 @@ pub trait WeightInfo { /// Weights for pallet_session using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn set_keys() -> Weight { (86_033_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 2f3f3adabc2c2..5ddebeb9f5791 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-society" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/society/README.md b/frame/society/README.md index b4e1fbaf22cba..a25940f636de9 100644 --- a/frame/society/README.md +++ b/frame/society/README.md @@ -24,7 +24,7 @@ Of the non-suspended members, there is always a: Of the non-suspended members of the society, a random set of them are chosen as "skeptics". The mechanics of skeptics is explained in the -[member phase](#member-phase) below. +[member phase](https://docs.rs/pallet-society/latest/pallet_society/#member-phase) below. ### Mechanics @@ -225,4 +225,4 @@ make judgement on a suspended candidate. * `set_max_membership` - The ROOT origin can update the maximum member count for the society. The max membership count must be greater than 1. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index cbfe5a00de240..f8f8fa61a00f6 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Society Module //! -//! - [`society::Trait`](./trait.Trait.html) +//! - [`society::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -268,13 +268,13 @@ use frame_support::traits::{ }; use frame_system::{self as system, ensure_signed, ensure_root}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; /// The module's configuration trait. -pub trait Trait: system::Trait { +pub trait Config: system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The societies's module id type ModuleId: Get; @@ -403,7 +403,7 @@ impl BidKind { // This module's storage items. decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Society { + trait Store for Module, I: Instance=DefaultInstance> as Society { /// The first member. pub Founder get(fn founder) build(|config: &GenesisConfig| config.members.first().cloned()): Option; @@ -472,7 +472,7 @@ decl_storage! { // The module's dispatchable functions. decl_module! { /// The module declaration. - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { type Error = Error; /// The minimum amount of a deposit required for a bid to be made. const CandidateDeposit: BalanceOf = T::CandidateDeposit::get(); @@ -533,7 +533,7 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn bid(origin, value: BalanceOf) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::Suspended); @@ -572,7 +572,7 @@ decl_module! { /// /// Total Complexity: O(B + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn unbid(origin, pos: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -642,7 +642,7 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn vouch(origin, who: T::AccountId, value: BalanceOf, tip: BalanceOf) -> DispatchResult { let voucher = ensure_signed(origin)?; // Check user is not suspended. @@ -683,7 +683,7 @@ decl_module! { /// /// Total Complexity: O(B) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn unvouch(origin, pos: u32) -> DispatchResult { let voucher = ensure_signed(origin)?; ensure!(Self::vouching(&voucher) == Some(VouchingStatus::Vouching), Error::::NotVouching); @@ -721,7 +721,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + C) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn vote(origin, candidate: ::Source, approve: bool) { let voter = ensure_signed(origin)?; let candidate = T::Lookup::lookup(candidate)?; @@ -752,7 +752,7 @@ decl_module! { /// /// Total Complexity: O(M + logM) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn defender_vote(origin, approve: bool) { let voter = ensure_signed(origin)?; let members = >::get(); @@ -784,7 +784,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + P + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn payout(origin) { let who = ensure_signed(origin)?; @@ -826,7 +826,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { T::FounderSetOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::AlreadyFounded); @@ -853,7 +853,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn unfound(origin) { let founder = ensure_signed(origin)?; ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); @@ -895,7 +895,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + B) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; ensure!(>::contains_key(&who), Error::::NotSuspended); @@ -966,7 +966,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + B + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn judge_suspended_candidate(origin, who: T::AccountId, judgement: Judgement) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; if let Some((value, kind)) = >::get(&who) { @@ -1026,7 +1026,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn set_max_members(origin, max: u32) { ensure_root(origin)?; ensure!(max > 1, Error::::MaxMembers); @@ -1038,13 +1038,14 @@ decl_module! { let mut members = vec![]; let mut weight = 0; + let weights = T::BlockWeights::get(); // Run a candidate/membership rotation if (n % T::RotationPeriod::get()).is_zero() { members = >::get(); Self::rotate_period(&mut members); - weight += T::MaximumBlockWeight::get() / 20; + weight += weights.max_block / 20; } // Run a challenge rotation @@ -1055,7 +1056,7 @@ decl_module! { } Self::rotate_challenge(&mut members); - weight += T::MaximumBlockWeight::get() / 20; + weight += weights.max_block / 20; } weight @@ -1065,7 +1066,7 @@ decl_module! { decl_error! { /// Errors for this module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// An incorrect position was provided. BadPosition, /// User is not a member. @@ -1108,7 +1109,7 @@ decl_error! { decl_event! { /// Events for this module. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, Balance = BalanceOf { /// The society is founded by the given identity. \[founder\] @@ -1151,7 +1152,7 @@ decl_event! { /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureFounder { +impl EnsureOrigin for EnsureFounder { type Success = T::AccountId; fn try_origin(o: T::Origin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { @@ -1182,7 +1183,7 @@ fn pick_usize<'a, R: RngCore>(rng: &mut R, max: usize) -> usize { (rng.next_u32() % (max as u32 + 1)) as usize } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Puts a bid into storage ordered by smallest to largest value. /// Allows a maximum of 1000 bids in queue, removing largest value people first. fn put_bid( @@ -1669,7 +1670,7 @@ impl, I: Instance> Module { } } -impl OnUnbalanced> for Module { +impl OnUnbalanced> for Module { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 212bcfd404ff1..4b1bb21dd18db 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,25 +18,34 @@ //! Test utilities use super::*; +use crate as pallet_society; use frame_support::{ - impl_outer_origin, parameter_types, ord_parameter_types, + parameter_types, ord_parameter_types, traits::{OnInitialize, OnFinalize, TestRandomness}, }; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; use frame_system::EnsureSignedBy; -impl_outer_origin! { - pub enum Origin for Test {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Society: pallet_society::{Module, Call, Storage, Event, Config}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const CandidateDeposit: u64 = 25; pub const WrongSideDeduction: u64 = 2; @@ -45,14 +54,11 @@ parameter_types! { pub const PeriodSpend: u64 = 1000; pub const MaxLockDuration: u64 = 100; pub const ChallengePeriod: u64 = 8; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: u32 = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { @@ -60,46 +66,43 @@ ord_parameter_types! { pub const SuspensionJudgementSetAccount: u128 = 2; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type OnNewAccount = (); type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type Currency = pallet_balances::Module; type Randomness = TestRandomness; type CandidateDeposit = CandidateDeposit; @@ -115,10 +118,6 @@ impl Trait for Test { type ModuleId = SocietyModuleId; } -pub type Society = Module; -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; - pub struct EnvBuilder { members: Vec, balance: u64, @@ -154,7 +153,7 @@ impl EnvBuilder { pallet_balances::GenesisConfig:: { balances: self.balances, }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ + pallet_society::GenesisConfig::{ members: self.members, pot: self.pot, max_members: self.max_members, diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index 0374c7bcd7a60..7c83448395770 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 88b8c1270a4e1..c5f7dba075455 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,33 +15,33 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-npos-elections = { version = "2.0.0", default-features = false, path = "../../primitives/npos-elections" } -sp-io ={ version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0", default-features = false, features = ["historical"], path = "../session" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } +sp-io ={ version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-session = { version = "3.0.0", default-features = false, features = ["historical"], path = "../session" } +pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } # Optional imports for benchmarking -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } -frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +pallet-balances = { version = "3.0.0", path = "../balances" } +pallet-timestamp = { version = "3.0.0", path = "../timestamp" } +pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } +substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } +frame-benchmarking = { version = "3.0.0", path = "../benchmarking" } rand_chacha = { version = "0.2" } -parking_lot = "0.10.2" +parking_lot = "0.11.1" hex = "0.4" [features] diff --git a/frame/staking/README.md b/frame/staking/README.md index b7b2141e58a5b..a379d0a7ad5e2 100644 --- a/frame/staking/README.md +++ b/frame/staking/README.md @@ -57,7 +57,7 @@ There are three possible roles that any staked account pair can be in: `Validato and `Idle` (defined in [`StakerStatus`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.StakerStatus.html)). There are three corresponding instructions to change between roles, namely: [`validate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.validate), -[`nominate`](./enum.Call.html#variant.nominate), and [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill). +[`nominate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.nominate), and [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill). #### Validating @@ -81,7 +81,7 @@ between the validator and its nominators. This rule incentivizes the nominators the misbehaving/offline validators as much as possible, simply because the nominators will also lose funds if they vote poorly. -An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call. +An account can become a nominator via the [`nominate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.nominate) call. #### Rewards and Slash @@ -90,7 +90,7 @@ valid behavior_ while _punishing any misbehavior or lack of availability_. Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each nominator's account. @@ -102,7 +102,7 @@ Slashing logic is further described in the documentation of the `slashing` modul Similar to slashing, rewards are also shared among a validator and its associated nominators. Yet, the reward funds are not always transferred to the stash account and can be configured. See -[Reward Calculation](#reward-calculation) for more details. +[Reward Calculation](https://docs.rs/pallet-staking/latest/pallet_staking/#reward-calculation) for more details. #### Chilling @@ -110,7 +110,7 @@ Finally, any of the roles above can choose to step back temporarily and just chi This means that if they are a nominator, they will not be considered as voters anymore and if they are validators, they will no longer be a candidate for the next election. -An account can step back via the [`chill`](enum.Call.html#variant.chill) call. +An account can step back via the [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill) call. ### Session managing @@ -137,10 +137,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_staking::{self as staking}; -pub trait Trait: staking::Trait {} +pub trait Config: staking::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Reward a validator. #[weight = 0] pub fn reward_myself(origin) -> dispatch::DispatchResult { @@ -176,14 +176,14 @@ Validators and nominators are rewarded at the end of each era. The total reward calculated using the era duration and the staking rate (the total amount of tokens staked by nominators and validators, divided by the total token supply). It aims to incentivize toward a defined staking rate. The full specification can be found -[here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model). +[here](https://research.web3.foundation/en/latest/polkadot/economics/1-token-economics.html#inflation-model). Total reward is split among validators and their nominators depending on the number of points they received during the era. Points are added to a validator using [`reward_by_ids`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.reward_by_ids) or [`reward_by_indices`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.reward_by_indices). -[`Module`](./struct.Module.html) implements +[`Module`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Module.html) implements [`pallet_authorship::EventHandler`](https://docs.rs/pallet-authorship/latest/pallet_authorship/trait.EventHandler.html) to add reward points to block producer and block producer of referenced uncles. @@ -198,11 +198,11 @@ validator and all of the nominators that nominated the validator, proportional t staked behind this validator (_i.e._ dividing the [`own`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.own) or [`others`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.others) by -[`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html)). +[`total`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.total) in [`Exposure`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html)). All entities who receive a reward have the option to choose their reward destination through the [`Payee`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Payee.html) storage item (see -[`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following: +[`set_payee`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.set_payee)), to be one of the following: - Controller account, (obviously) not increasing the staked value. - Stash account, not increasing the staked value. @@ -213,14 +213,14 @@ All entities who receive a reward have the option to choose their reward destina Any funds already placed into stash can be the target of the following operations: The controller account can free a portion (or all) of the funds using the -[`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately -accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.BondingDuration.html) +[`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the funds are not immediately +accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.BondingDuration) (in number of eras) must pass until the funds can actually be removed. Once the `BondingDuration` is over, the [`withdraw_unbonded`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.withdraw_unbonded) call can be used to actually withdraw the funds. Note that there is a limitation to the number of fund-chunks that can be scheduled to be -unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum +unlocked in the future via [`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond). In case this maximum (`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful call to `withdraw_unbonded` to remove some of the chunks. @@ -246,4 +246,4 @@ The Staking module depends on the [`GenesisConfig`](https://docs.rs/pallet-staki - [Session](https://docs.rs/pallet-session/latest/pallet_session/): Used to manage sessions. Also, a list of new validators is stored in the Session module's `Validators` at the end of each era. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml index e1431aa54d4a7..a88e9619174c6 100644 --- a/frame/staking/fuzzer/Cargo.toml +++ b/frame/staking/fuzzer/Cargo.toml @@ -14,20 +14,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -pallet-staking = { version = "2.0.0", path = "..", features = ["runtime-benchmarks"] } -pallet-staking-reward-curve = { version = "2.0.0", path = "../reward-curve" } -pallet-session = { version = "2.0.0", path = "../../session" } -pallet-indices = { version = "2.0.0", path = "../../indices" } -pallet-balances = { version = "2.0.0", path = "../../balances" } -pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } -frame-system = { version = "2.0.0", path = "../../system" } -frame-support = { version = "2.0.0", path = "../../support" } -sp-std = { version = "2.0.0", path = "../../../primitives/std" } -sp-io ={ version = "2.0.0", path = "../../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-npos-elections = { version = "2.0.0", path = "../../../primitives/npos-elections" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +pallet-staking = { version = "3.0.0", path = "..", features = ["runtime-benchmarks"] } +pallet-staking-reward-curve = { version = "3.0.0", path = "../reward-curve" } +pallet-session = { version = "3.0.0", path = "../../session" } +pallet-indices = { version = "3.0.0", path = "../../indices" } +pallet-balances = { version = "3.0.0", path = "../../balances" } +pallet-timestamp = { version = "3.0.0", path = "../../timestamp" } +frame-system = { version = "3.0.0", path = "../../system" } +frame-support = { version = "3.0.0", path = "../../support" } +sp-std = { version = "3.0.0", path = "../../../primitives/std" } +sp-io ={ version = "3.0.0", path = "../../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-npos-elections = { version = "3.0.0", path = "../../../primitives/npos-elections" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +serde = "1.0.101" + +[features] +# Note feature std is required so that impl_opaque_keys derive serde. +default = ["std"] +std = [] [[bin]] name = "submit_solution" diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 96df7674e9f44..88b001c7e69e1 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,39 +17,36 @@ //! Mock file for staking fuzzing. -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; +use frame_support::parameter_types; type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; type Balance = u64; -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; -pub type Staking = pallet_staking::Module; -pub type Indices = pallet_indices::Module; -pub type Session = pallet_session::Module; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - staking::Staking, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, } -} +); -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; - -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); - type Origin = Origin; + type BlockWeights = (); + type BlockLength = (); type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; type Call = Call; @@ -58,33 +55,31 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = Indices; type Header = sp_runtime::testing::Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = (Balances,); + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = Balance; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } -impl pallet_indices::Trait for Test { +impl pallet_indices::Config for Test { type AccountIndex = AccountIndex; - type Event = (); + type Event = Event; type Currency = Balances; type Deposit = (); type WeightInfo = (); @@ -92,13 +87,13 @@ impl pallet_indices::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -124,13 +119,13 @@ impl pallet_session::SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; type SessionHandler = TestSessionHandler; - type Event = (); + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); @@ -161,12 +156,12 @@ impl frame_system::offchain::SendTransactionTypes for Test where type Extrinsic = Extrinsic; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = (); + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs index 4f85066f7f66a..d94ee49b96db4 100644 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ b/frame/staking/fuzzer/src/submit_solution.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 19f7e51b8f6ce..8713f5e1001cc 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,10 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.7", features = ["full", "visit"] } +syn = { version = "1.0.58", features = ["full", "visit"] } quote = "1.0.3" proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" [dev-dependencies] -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index 275669fe26b3b..3a8d625e83575 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/staking/reward-curve/tests/test.rs b/frame/staking/reward-curve/tests/test.rs index 45ad59e00ad27..fda7df145d0f3 100644 --- a/frame/staking/reward-curve/tests/test.rs +++ b/frame/staking/reward-curve/tests/test.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index d2f769b069435..beddc326b5109 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,7 @@ use super::*; use crate::Module as Staking; use testing_utils::*; +use sp_npos_elections::CompactSolution; use sp_runtime::traits::One; use frame_system::RawOrigin; pub use frame_benchmarking::{benchmarks, account, whitelisted_caller, whitelist_account}; @@ -31,7 +32,7 @@ const MAX_SLASHES: u32 = 1000; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark // read and write operations. -fn add_slashing_spans(who: &T::AccountId, spans: u32) { +fn add_slashing_spans(who: &T::AccountId, spans: u32) { if spans == 0 { return } // For the first slashing span, we initialize @@ -45,20 +46,24 @@ fn add_slashing_spans(who: &T::AccountId, spans: u32) { SlashingSpans::::insert(who, slashing_spans); } -// This function generates one validator being nominated by n nominators, and returns the validator -// stash account and the nominators' stash and controller. It also starts an era and creates pending payouts. -pub fn create_validator_with_nominators( +// This function clears all existing validators and nominators from the set, and generates one new +// validator being nominated by n nominators, and returns the validator stash account and the +// nominators' stash and controller. It also starts an era and creates pending payouts. +pub fn create_validator_with_nominators( n: u32, upper_bound: u32, dead: bool, destination: RewardDestination ) -> Result<(T::AccountId, Vec<(T::AccountId, T::AccountId)>), &'static str> { + // Clean up any existing state. + clear_validators_and_nominators::(); let mut points_total = 0; let mut points_individual = Vec::new(); let (v_stash, v_controller) = create_stash_controller::(0, 100, destination.clone())?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), + .. Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); @@ -110,8 +115,6 @@ pub fn create_validator_with_nominators( const USER_SEED: u32 = 999666; benchmarks! { - _{} - bond { let stash = create_funded_user::("stash", USER_SEED, 100); let controller = create_funded_user::("controller", USER_SEED, 100); @@ -196,6 +199,61 @@ benchmarks! { assert!(Validators::::contains_key(stash)); } + kick { + // scenario: we want to kick `k` nominators from nominating us (we are a validator). + // we'll assume that `k` is under 128 for the purposes of determining the slope. + // each nominator should have `MAX_NOMINATIONS` validators nominated, and our validator + // should be somewhere in there. + let k in 1 .. 128; + + // these are the other validators; there are `MAX_NOMINATIONS - 1` of them, so there are a + // total of `MAX_NOMINATIONS` validators in the system. + let rest_of_validators = create_validators::(MAX_NOMINATIONS as u32 - 1, 100)?; + + // this is the validator that will be kicking. + let (stash, controller) = create_stash_controller::(MAX_NOMINATIONS as u32 - 1, 100, Default::default())?; + let stash_lookup: ::Source = T::Lookup::unlookup(stash.clone()); + + // they start validating. + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), Default::default())?; + + // we now create the nominators. there will be `k` of them; each will nominate all + // validators. we will then kick each of the `k` nominators from the main validator. + let mut nominator_stashes = Vec::with_capacity(k as usize); + for i in 0 .. k { + // create a nominator stash. + let (n_stash, n_controller) = create_stash_controller::(MAX_NOMINATIONS as u32 + i, 100, Default::default())?; + + // bake the nominations; we first clone them from the rest of the validators. + let mut nominations = rest_of_validators.clone(); + // then insert "our" validator somewhere in there (we vary it) to avoid accidental + // optimisations/pessimisations. + nominations.insert(i as usize % (nominations.len() + 1), stash_lookup.clone()); + // then we nominate. + Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), nominations)?; + + nominator_stashes.push(n_stash); + } + + // all nominators now should be nominating our validator... + for n in nominator_stashes.iter() { + assert!(Nominators::::get(n).unwrap().targets.contains(&stash)); + } + + // we need the unlookuped version of the nominator stash for the kick. + let kicks = nominator_stashes.iter() + .map(|n| T::Lookup::unlookup(n.clone())) + .collect::>(); + + whitelist_account!(controller); + }: _(RawOrigin::Signed(controller), kicks) + verify { + // all nominators now should *not* be nominating our validator... + for n in nominator_stashes.iter() { + assert!(!Nominators::::get(n).unwrap().targets.contains(&stash)); + } + } + // Worst case scenario, MAX_NOMINATIONS nominate { let n in 1 .. MAX_NOMINATIONS as u32; @@ -286,8 +344,6 @@ benchmarks! { payout_stakers_dead_controller { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - // Clean up existing validators - Validators::::remove_all(); let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -321,8 +377,6 @@ benchmarks! { payout_stakers_alive_staked { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - // Clean up existing validators - Validators::::remove_all(); let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -400,7 +454,7 @@ benchmarks! { let s in 1 .. MAX_SPANS; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); - T::Currency::make_free_balance_be(&stash, 0u32.into()); + T::Currency::make_free_balance_be(&stash, T::Currency::minimum_balance()); whitelist_account!(controller); }: _(RawOrigin::Signed(controller), stash.clone(), s) verify { @@ -522,7 +576,12 @@ benchmarks! { compact, score, size - ) = offchain_election::prepare_submission::(assignments, winners, false, T::MaximumBlockWeight::get()).unwrap(); + ) = offchain_election::prepare_submission::( + assignments, + winners, + false, + T::BlockWeights::get().max_block, + ).unwrap(); assert_eq!( winners.len(), compact.unique_targets().len(), @@ -590,7 +649,12 @@ benchmarks! { compact, score, size - ) = offchain_election::prepare_submission::(assignments, winners, false, T::MaximumBlockWeight::get()).unwrap(); + ) = offchain_election::prepare_submission::( + assignments, + winners, + false, + T::BlockWeights::get().max_block, + ).unwrap(); assert_eq!( winners.len(), compact.unique_targets().len(), @@ -708,7 +772,7 @@ mod tests { #[test] fn create_validators_with_nominators_for_era_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build().execute_with(|| { let v = 10; let n = 100; @@ -725,12 +789,12 @@ mod tests { #[test] fn create_validator_with_nominators_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build().execute_with(|| { let n = 10; let (validator_stash, nominators) = create_validator_with_nominators::( n, - ::MaxNominatorRewardedPerValidator::get() as u32, + ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, ).unwrap(); @@ -749,12 +813,12 @@ mod tests { #[test] fn add_slashing_spans_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build().execute_with(|| { let n = 10; let (validator_stash, _nominators) = create_validator_with_nominators::( n, - ::MaxNominatorRewardedPerValidator::get() as u32, + ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, ).unwrap(); @@ -780,7 +844,7 @@ mod tests { #[test] fn test_payout_all() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build().execute_with(|| { let v = 10; let n = 100; @@ -799,13 +863,14 @@ mod tests { #[test] fn test_benchmarks() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build().execute_with(|| { assert_ok!(test_benchmark_bond::()); assert_ok!(test_benchmark_bond_extra::()); assert_ok!(test_benchmark_unbond::()); assert_ok!(test_benchmark_withdraw_unbonded_update::()); assert_ok!(test_benchmark_withdraw_unbonded_kill::()); assert_ok!(test_benchmark_validate::()); + assert_ok!(test_benchmark_kick::()); assert_ok!(test_benchmark_nominate::()); assert_ok!(test_benchmark_chill::()); assert_ok!(test_benchmark_set_payee::()); diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index 2161fe20af829..bd9d1f8bbdb30 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index fdea1c18e768b..3ea66e937e83c 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ //! //! The Staking module is used to manage funds at stake by network maintainers. //! -//! - [`staking::Trait`](./trait.Trait.html) +//! - [`staking::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -107,7 +107,7 @@ //! //! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the //! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -//! validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +//! validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] //! biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each //! nominator's account. //! @@ -154,10 +154,10 @@ //! use frame_system::ensure_signed; //! use pallet_staking::{self as staking}; //! -//! pub trait Trait: staking::Trait {} +//! pub trait Config: staking::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! /// Reward a validator. //! #[weight = 0] //! pub fn reward_myself(origin) -> dispatch::DispatchResult { @@ -175,7 +175,7 @@ //! ### Era payout //! //! The era payout is computed using yearly inflation curve defined at -//! [`T::RewardCurve`](./trait.Trait.html#associatedtype.RewardCurve) as such: +//! [`T::RewardCurve`](./trait.Config.html#associatedtype.RewardCurve) as such: //! //! ```nocompile //! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -186,7 +186,7 @@ //! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout //! ``` //! The remaining reward is send to the configurable end-point -//! [`T::RewardRemainder`](./trait.Trait.html#associatedtype.RewardRemainder). +//! [`T::RewardRemainder`](./trait.Config.html#associatedtype.RewardRemainder). //! //! ### Reward Calculation //! @@ -232,10 +232,11 @@ //! //! The controller account can free a portion (or all) of the funds using the //! [`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately -//! accessible. Instead, a duration denoted by [`BondingDuration`](./struct.BondingDuration.html) -//! (in number of eras) must pass until the funds can actually be removed. Once the -//! `BondingDuration` is over, the [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded) -//! call can be used to actually withdraw the funds. +//! accessible. Instead, a duration denoted by +//! [`BondingDuration`](./trait.Config.html#associatedtype.BondingDuration) (in number of eras) must +//! pass until the funds can actually be removed. Once the `BondingDuration` is over, the +//! [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded) call can be used to actually +//! withdraw the funds. //! //! Note that there is a limitation to the number of fund-chunks that can be scheduled to be //! unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum @@ -304,7 +305,7 @@ use frame_support::{ }; use pallet_session::historical; use sp_runtime::{ - Percent, Perbill, PerU16, PerThing, InnerOf, RuntimeDebug, DispatchError, + Percent, Perbill, PerU16, RuntimeDebug, DispatchError, curve::PiecewiseLinear, traits::{ Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, @@ -327,14 +328,14 @@ use frame_system::{ }; use sp_npos_elections::{ ExtendedBalance, Assignment, ElectionScore, ElectionResult as PrimitiveElectionResult, - build_support_map, evaluate_support, seq_phragmen, generate_solution_type, - is_score_better, VotingLimit, SupportMap, VoteWeight, + to_support_map, EvaluateSupport, seq_phragmen, generate_solution_type, is_score_better, + SupportMap, VoteWeight, CompactSolution, PerThing128, }; pub use weights::WeightInfo; const STAKING_ID: LockIdentifier = *b"staking "; pub const MAX_UNLOCKING_CHUNKS: usize = 32; -pub const MAX_NOMINATIONS: usize = ::LIMIT; +pub const MAX_NOMINATIONS: usize = ::LIMIT; pub(crate) const LOG_TARGET: &'static str = "staking"; @@ -385,12 +386,12 @@ pub type OffchainAccuracy = PerU16; /// The balance type of this module. pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; + <::Currency as Currency<::AccountId>>::PositiveImbalance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug)] @@ -453,12 +454,17 @@ pub struct ValidatorPrefs { /// nominators. #[codec(compact)] pub commission: Perbill, + /// Whether or not this validator is accepting more nominations. If `true`, then no nominator + /// who is not already nominating this validator may nominate them. By default, validators + /// are accepting nominations. + pub blocked: bool, } impl Default for ValidatorPrefs { fn default() -> Self { ValidatorPrefs { commission: Default::default(), + blocked: false, } } } @@ -732,8 +738,8 @@ impl Default for ElectionStatus { /// Means for interacting with a specialized version of the `session` trait. /// -/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Trait` -pub trait SessionInterface: frame_system::Trait { +/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config` +pub trait SessionInterface: frame_system::Config { /// Disable a given validator by stash ID. /// /// Returns `true` if new era should be forced at the end of this session. @@ -746,22 +752,22 @@ pub trait SessionInterface: frame_system::Trait { fn prune_historical_up_to(up_to: SessionIndex); } -impl SessionInterface<::AccountId> for T where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, +impl SessionInterface<::AccountId> for T where + T: pallet_session::Config::AccountId>, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, FullIdentificationOf = ExposureOf, >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: - Convert<::AccountId, Option<::AccountId>>, + Convert<::AccountId, Option<::AccountId>>, { - fn disable_validator(validator: &::AccountId) -> Result { + fn disable_validator(validator: &::AccountId) -> Result { >::disable(validator) } - fn validators() -> Vec<::AccountId> { + fn validators() -> Vec<::AccountId> { >::validators() } @@ -770,7 +776,7 @@ impl SessionInterface<::AccountId> for T whe } } -pub trait Trait: frame_system::Trait + SendTransactionTypes> { +pub trait Config: frame_system::Config + SendTransactionTypes> { /// The staking balance. type Currency: LockableCurrency; @@ -792,7 +798,7 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { type RewardRemainder: OnUnbalanced>; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Handler for the unbalanced reduction when slashing a staker. type Slash: OnUnbalanced>; @@ -895,16 +901,17 @@ enum Releases { V2_0_0, V3_0_0, V4_0_0, + V5_0_0, } impl Default for Releases { fn default() -> Self { - Releases::V4_0_0 + Releases::V5_0_0 } } decl_storage! { - trait Store for Module as Staking { + trait Store for Module as Staking { /// Number of eras to keep in history. /// /// Information is kept for eras in `[current_era - history_depth; current_era]`. @@ -952,11 +959,14 @@ decl_storage! { /// The active era information, it holds index and start. /// - /// The active era is the era currently rewarded. - /// Validator set of this era must be equal to `SessionInterface::validators`. + /// The active era is the era being currently rewarded. Validator set of this era must be + /// equal to [`SessionInterface::validators`]. pub ActiveEra get(fn active_era): Option; /// The session index at which the era start for the last `HISTORY_DEPTH` eras. + /// + /// Note: This tracks the starting session (i.e. session index when era start being active) + /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. pub ErasStartSessionIndex get(fn eras_start_session_index): map hasher(twox_64_concat) EraIndex => Option; @@ -1083,8 +1093,8 @@ decl_storage! { /// True if network has been upgraded to this version. /// Storage version of the pallet. /// - /// This is set to v3.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V4_0_0): Releases; + /// This is set to v5.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V5_0_0): Releases; } add_extra_genesis { config(stakers): @@ -1120,8 +1130,31 @@ decl_storage! { } } +pub mod migrations { + use super::*; + + #[derive(Decode)] + struct OldValidatorPrefs { + #[codec(compact)] + pub commission: Perbill + } + impl OldValidatorPrefs { + fn upgraded(self) -> ValidatorPrefs { + ValidatorPrefs { + commission: self.commission, + .. Default::default() + } + } + } + pub fn migrate_to_blockable() -> frame_support::weights::Weight { + Validators::::translate::(|_, p| Some(p.upgraded())); + ErasValidatorPrefs::::translate::(|_, _, p| Some(p.upgraded())); + T::BlockWeights::get().max_block + } +} + decl_event!( - pub enum Event where Balance = BalanceOf, ::AccountId { + pub enum Event where Balance = BalanceOf, ::AccountId { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. /// \[era_index, validator_payout, remainder\] @@ -1148,12 +1181,14 @@ decl_event!( /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` /// from the unlocking queue. \[stash, amount\] Withdrawn(AccountId, Balance), + /// A nominator has been kicked from a validator. \[nominator, stash\] + Kicked(AccountId, AccountId), } ); decl_error! { /// Error for the staking module. - pub enum Error for Module { + pub enum Error for Module { /// Not a controller account. NotController, /// Not a stash account. @@ -1219,11 +1254,17 @@ decl_error! { IncorrectHistoryDepth, /// Incorrect number of slashing spans provided. IncorrectSlashingSpans, + /// Internal state has become somehow corrupted and the operation cannot continue. + BadState, + /// Too many nomination targets supplied. + TooManyTargets, + /// A nomination target was supplied that was blocked or otherwise not a validator. + BadTarget, } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Number of sessions per era. const SessionsPerEra: SessionIndex = T::SessionsPerEra::get(); @@ -1264,6 +1305,15 @@ decl_module! { fn deposit_event() = default; + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if StorageVersion::get() == Releases::V4_0_0 { + StorageVersion::put(Releases::V5_0_0); + migrations::migrate_to_blockable::() + } else { + 0 + } + } + /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the /// election window has opened, if we are at the last session and less blocks than /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain @@ -1420,13 +1470,13 @@ decl_module! { Err(Error::::InsufficientValue)? } + system::Module::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; + // You're auto-bonded forever, here. We might improve this by only bonding when // you actually validate/nominate and remove once you unbond __everything__. >::insert(&stash, &controller); >::insert(&stash, payee); - system::Module::::inc_ref(&stash); - let current_era = CurrentEra::get().unwrap_or(0); let history_depth = Self::history_depth(); let last_reward_era = current_era.saturating_sub(history_depth); @@ -1474,11 +1524,13 @@ decl_module! { let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash_balance = T::Currency::free_balance(&stash); - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { let extra = extra.min(max_additional); ledger.total += extra; ledger.active += extra; + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + Self::deposit_event(RawEvent::Bonded(stash, extra)); Self::update_ledger(&controller, &ledger); } @@ -1586,7 +1638,7 @@ decl_module! { ledger = ledger.consolidate_unlocked(current_era) } - let post_info_weight = if ledger.unlocking.is_empty() && ledger.active.is_zero() { + let post_info_weight = if ledger.unlocking.is_empty() && ledger.active <= T::Currency::minimum_balance() { // This account must have called `unbond()` with some value that caused the active // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove all staking-related information. @@ -1667,9 +1719,17 @@ decl_module! { let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; ensure!(!targets.is_empty(), Error::::EmptyTargets); + ensure!(targets.len() <= MAX_NOMINATIONS, Error::::TooManyTargets); + + let old = Nominators::::get(stash).map_or_else(Vec::new, |x| x.targets); + let targets = targets.into_iter() - .take(MAX_NOMINATIONS) - .map(|t| T::Lookup::lookup(t)) + .map(|t| T::Lookup::lookup(t).map_err(DispatchError::from)) + .map(|n| n.and_then(|n| if old.contains(&n) || !Validators::::get(&n).blocked { + Ok(n) + } else { + Err(Error::::BadTarget.into()) + })) .collect::, _>>()?; let nominations = Nominations { @@ -1973,6 +2033,9 @@ decl_module! { ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let ledger = ledger.rebond(value); + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + Self::update_ledger(&controller, &ledger); Ok(Some( 35 * WEIGHT_PER_MICROS @@ -2020,9 +2083,9 @@ decl_module! { } } - /// Remove all data structure concerning a staker/stash once its balance is zero. + /// Remove all data structure concerning a staker/stash once its balance is at the minimum. /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone - /// and the target `stash` must have no funds left. + /// and the target `stash` must have no funds left beyond the ED. /// /// This can be called from any origin. /// @@ -2037,7 +2100,8 @@ decl_module! { /// # #[weight = T::WeightInfo::reap_stash(*num_slashing_spans)] fn reap_stash(_origin, stash: T::AccountId, num_slashing_spans: u32) { - ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); + let at_minimum = T::Currency::total_balance(&stash) == T::Currency::minimum_balance(); + ensure!(at_minimum, Error::::FundedTarget); Self::kill_stash(&stash, num_slashing_spans)?; T::Currency::remove_lock(STAKING_ID, &stash); } @@ -2094,7 +2158,7 @@ decl_module! { #[weight = T::WeightInfo::submit_solution_better( size.validators.into(), size.nominators.into(), - compact.len() as u32, + compact.voter_count() as u32, winners.len() as u32, )] pub fn submit_election_solution( @@ -2128,7 +2192,7 @@ decl_module! { #[weight = T::WeightInfo::submit_solution_better( size.validators.into(), size.nominators.into(), - compact.len() as u32, + compact.voter_count() as u32, winners.len() as u32, )] pub fn submit_election_solution_unsigned( @@ -2156,17 +2220,53 @@ decl_module! { Ok(adjustments) } + + /// Remove the given nominations from the calling validator. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. The controller + /// account should represent a validator. + /// + /// - `who`: A list of nominator stash accounts who are nominating this validator which + /// should no longer be nominating this validator. + /// + /// Note: Making this call only makes sense if you first set the validator preferences to + /// block any further nominations. + #[weight = T::WeightInfo::kick(who.len() as u32)] + pub fn kick(origin, who: Vec<::Source>) -> DispatchResult { + let controller = ensure_signed(origin)?; + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = &ledger.stash; + + for nom_stash in who.into_iter() + .map(T::Lookup::lookup) + .collect::, _>>()? + .into_iter() + { + Nominators::::mutate(&nom_stash, |maybe_nom| if let Some(ref mut nom) = maybe_nom { + if let Some(pos) = nom.targets.iter().position(|v| v == stash) { + nom.targets.swap_remove(pos); + Self::deposit_event(RawEvent::Kicked(nom_stash.clone(), stash.clone())); + } + }); + } + + Ok(()) + } } } -impl Module { +impl Module { /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() } - /// Internal impl of [`slashable_balance_of`] that returns [`VoteWeight`]. + /// Internal impl of [`Self::slashable_balance_of`] that returns [`VoteWeight`]. pub fn slashable_balance_of_vote_weight(stash: &T::AccountId, issuance: BalanceOf) -> VoteWeight { T::CurrencyToVote::to_vote(Self::slashable_balance_of(stash), issuance) } @@ -2590,13 +2690,11 @@ impl Module { ); // build the support map thereof in order to evaluate. - let supports = build_support_map::( - &winners, - &staked_assignments, - ).map_err(|_| Error::::OffchainElectionBogusEdge)?; + let supports = to_support_map::(&winners, &staked_assignments) + .map_err(|_| Error::::OffchainElectionBogusEdge)?; // Check if the score is the same as the claimed one. - let submitted_score = evaluate_support(&supports); + let submitted_score = (&supports).evaluate(); ensure!(submitted_score == claimed_score, Error::::OffchainElectionBogusScore); // At last, alles Ok. Exposures and store the result. @@ -2625,14 +2723,17 @@ impl Module { /// Start a session potentially starting an era. fn start_session(start_session: SessionIndex) { let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); + // This is only `Some` when current era has already progressed to the next era, while the + // active era is one behind (i.e. in the *last session of the active era*, or *first session + // of the new current era*, depending on how you look at it). if let Some(next_active_era_start_session_index) = Self::eras_start_session_index(next_active_era) { if next_active_era_start_session_index == start_session { Self::start_era(start_session); } else if next_active_era_start_session_index < start_session { - // This arm should never happen, but better handle it than to stall the - // staking pallet. + // This arm should never happen, but better handle it than to stall the staking + // pallet. frame_support::print("Warning: A session appears to have been skipped."); Self::start_era(start_session); } @@ -2833,7 +2934,7 @@ impl Module { /// Execute election and return the new results. The edge weights are processed into support /// values. /// - /// This is basically a wrapper around [`do_phragmen`] which translates + /// This is basically a wrapper around [`Self::do_phragmen`] which translates /// `PrimitiveElectionResult` into `ElectionResult`. /// /// No storage item is updated. @@ -2849,7 +2950,7 @@ impl Module { Self::slashable_balance_of_fn(), ); - let supports = build_support_map::( + let supports = to_support_map::( &elected_stashes, &staked_assignments, ) @@ -2888,10 +2989,9 @@ impl Module { /// Self votes are added and nominations before the most recent slashing span are ignored. /// /// No storage item is updated. - pub fn do_phragmen(iterations: usize) - -> Option> - where ExtendedBalance: From> - { + pub fn do_phragmen( + iterations: usize, + ) -> Option> { let weight_of = Self::slashable_balance_of_fn(); let mut all_nominators: Vec<(T::AccountId, VoteWeight, Vec)> = Vec::new(); let mut all_validators = Vec::new(); @@ -2923,7 +3023,11 @@ impl Module { if all_validators.len() < Self::minimum_validator_count().max(1) as usize { // If we don't have enough candidates, nothing to do. - log!(error, "💸 Chain does not have enough staking candidates to operate. Era {:?}.", Self::current_era()); + log!( + warn, + "💸 Chain does not have enough staking candidates to operate. Era {:?}.", + Self::current_era() + ); None } else { seq_phragmen::<_, Accuracy>( @@ -2932,7 +3036,7 @@ impl Module { all_nominators, Some((iterations, 0)), // exactly run `iterations` rounds. ) - .map_err(|err| log!(error, "Call to seq-phragmen failed due to {}", err)) + .map_err(|err| log!(error, "Call to seq-phragmen failed due to {:?}", err)) .ok() } } @@ -2990,7 +3094,7 @@ impl Module { >::remove(stash); >::remove(stash); - system::Module::::dec_ref(stash); + system::Module::::dec_consumers(stash); Ok(()) } @@ -3083,19 +3187,37 @@ impl Module { /// /// Once the first new_session is planned, all session must start and then end in order, though /// some session can lag in between the newest session planned and the latest session started. -impl pallet_session::SessionManager for Module { +impl pallet_session::SessionManager for Module { fn new_session(new_index: SessionIndex) -> Option> { + frame_support::debug::native::trace!( + target: LOG_TARGET, + "[{}] planning new_session({})", + >::block_number(), + new_index + ); Self::new_session(new_index) } fn start_session(start_index: SessionIndex) { + frame_support::debug::native::trace!( + target: LOG_TARGET, + "[{}] starting start_session({})", + >::block_number(), + start_index + ); Self::start_session(start_index) } fn end_session(end_index: SessionIndex) { + frame_support::debug::native::trace!( + target: LOG_TARGET, + "[{}] ending end_session({})", + >::block_number(), + end_index + ); Self::end_session(end_index) } } -impl historical::SessionManager>> for Module { +impl historical::SessionManager>> for Module { fn new_session(new_index: SessionIndex) -> Option>)>> { @@ -3124,7 +3246,7 @@ impl historical::SessionManager pallet_authorship::EventHandler for Module where - T: Trait + pallet_authorship::Trait + pallet_session::Trait + T: Config + pallet_authorship::Config + pallet_session::Config { fn note_author(author: T::AccountId) { Self::reward_by_ids(vec![(author, 20)]) @@ -3141,7 +3263,7 @@ impl pallet_authorship::EventHandler for Module /// if any. pub struct StashOf(sp_std::marker::PhantomData); -impl Convert> for StashOf { +impl Convert> for StashOf { fn convert(controller: T::AccountId) -> Option { >::ledger(&controller).map(|l| l.stash) } @@ -3154,7 +3276,7 @@ impl Convert> for StashOf { /// `active_era`. It can differ from the latest planned exposure in `current_era`. pub struct ExposureOf(sp_std::marker::PhantomData); -impl Convert>>> +impl Convert>>> for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { @@ -3167,19 +3289,19 @@ impl Convert> } /// This is intended to be used with `FilterHistoricalOffences`. -impl +impl OnOffenceHandler, Weight> for Module where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, + T: pallet_session::Config::AccountId>, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, FullIdentificationOf = ExposureOf, >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: Convert< - ::AccountId, - Option<::AccountId>, + ::AccountId, + Option<::AccountId>, >, { fn on_offence( @@ -3310,7 +3432,7 @@ pub struct FilterHistoricalOffences { impl ReportOffence for FilterHistoricalOffences, R> where - T: Trait, + T: Config, R: ReportOffence, O: Offence, { @@ -3335,7 +3457,7 @@ impl ReportOffence } #[allow(deprecated)] -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_election_solution_unsigned( diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 055ebb9730805..0eb77e7c14ac1 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,16 +18,17 @@ //! Test utilities use crate::*; +use crate as staking; use frame_support::{ - assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, - traits::{Currency, FindAuthor, Get, OnFinalize, OnInitialize}, + assert_ok, parameter_types, + traits::{Currency, FindAuthor, Get, OnFinalize, OnInitialize, OneSessionHandler}, weights::{constants::RocksDbWeight, Weight}, IterableStorageMap, StorageDoubleMap, StorageMap, StorageValue, }; use sp_core::H256; use sp_io; use sp_npos_elections::{ - build_support_map, evaluate_support, reduce, ExtendedBalance, StakedAssignment, ElectionScore, + to_support_map, EvaluateSupport, reduce, ExtendedBalance, StakedAssignment, ElectionScore, }; use sp_runtime::{ curve::PiecewiseLinear, @@ -38,6 +39,7 @@ use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; use std::{cell::RefCell, collections::HashSet}; pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; /// The AccountId alias in this test module. pub(crate) type AccountId = u64; @@ -47,17 +49,11 @@ pub(crate) type Balance = u128; thread_local! { static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); - static SESSION_PER_ERA: RefCell = RefCell::new(3); - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - static SLASH_DEFER_DURATION: RefCell = RefCell::new(0); - static ELECTION_LOOKAHEAD: RefCell = RefCell::new(0); - static PERIOD: RefCell = RefCell::new(1); - static MAX_ITERATIONS: RefCell = RefCell::new(0); } /// Another session handler struct to test on_disabled. pub struct OtherSessionHandler; -impl pallet_session::OneSessionHandler for OtherSessionHandler { +impl OneSessionHandler for OtherSessionHandler { type Key = UintAuthorityId; fn on_genesis_session<'a, I: 'a>(_: I) @@ -92,79 +88,22 @@ pub fn is_disabled(controller: AccountId) -> bool { SESSION.with(|d| d.borrow().1.contains(&stash)) } -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> Balance { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) - } -} - -pub struct SessionsPerEra; -impl Get for SessionsPerEra { - fn get() -> SessionIndex { - SESSION_PER_ERA.with(|v| *v.borrow()) - } -} -impl Get for SessionsPerEra { - fn get() -> BlockNumber { - SESSION_PER_ERA.with(|v| *v.borrow() as BlockNumber) - } -} - -pub struct ElectionLookahead; -impl Get for ElectionLookahead { - fn get() -> BlockNumber { - ELECTION_LOOKAHEAD.with(|v| *v.borrow()) - } -} - -pub struct Period; -impl Get for Period { - fn get() -> BlockNumber { - PERIOD.with(|v| *v.borrow()) - } -} - -pub struct SlashDeferDuration; -impl Get for SlashDeferDuration { - fn get() -> EraIndex { - SLASH_DEFER_DURATION.with(|v| *v.borrow()) - } -} - -pub struct MaxIterations; -impl Get for MaxIterations { - fn get() -> u32 { - MAX_ITERATIONS.with(|v| *v.borrow()) - } -} - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - staking::Staking, - } -} - -mod staking { - // Re-export needed for `impl_outer_event!`. - pub use super::super::*; -} -use frame_system as system; -use pallet_balances as balances; -use pallet_session as session; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_event! { - pub enum MetaEvent for Test { - system, - balances, - session, - staking, +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Staking: staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, } -} +); /// Author of block is always 11 pub struct Author11; @@ -176,19 +115,27 @@ impl FindAuthor for Author11 { } } -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = frame_support::weights::constants::WEIGHT_PER_SECOND * 2; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::constants::WEIGHT_PER_SECOND * 2 + ); pub const MaxLocks: u32 = 1024; + pub static SessionsPerEra: SessionIndex = 3; + pub static ExistentialDeposit: Balance = 1; + pub static SlashDeferDuration: EraIndex = 0; + pub static ElectionLookahead: BlockNumber = 0; + pub static Period: BlockNumber = 5; + pub static Offset: BlockNumber = 0; + pub static MaxIterations: u32 = 0; } -impl frame_system::Trait for Test { + +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -198,33 +145,26 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type Event = MetaEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; type Balance = Balance; - type Event = MetaEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } parameter_types! { - pub const Offset: BlockNumber = 0; pub const UncleGenerations: u64 = 0; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); } @@ -233,12 +173,12 @@ sp_runtime::impl_opaque_keys! { pub other: OtherSessionHandler, } } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = (OtherSessionHandler,); - type Event = MetaEvent; + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; @@ -246,11 +186,11 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = crate::Exposure; type FullIdentificationOf = crate::ExposureOf; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = Author11; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -259,7 +199,7 @@ impl pallet_authorship::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -281,7 +221,7 @@ parameter_types! { pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const UnsignedPriority: u64 = 1 << 20; pub const MinSolutionScoreBump: Perbill = Perbill::zero(); - pub const OffchainSolutionWeightLimit: Weight = MaximumBlockWeight::get(); + pub OffchainSolutionWeightLimit: Weight = BlockWeights::get().max_block; } thread_local! { @@ -299,12 +239,12 @@ impl OnUnbalanced> for RewardRemainderMock { } } -impl Trait for Test { +impl Config for Test { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = RewardRemainderMock; - type Event = MetaEvent; + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = SessionsPerEra; @@ -335,46 +275,36 @@ where pub type Extrinsic = TestXt; pub struct ExtBuilder { - session_length: BlockNumber, - election_lookahead: BlockNumber, - session_per_era: SessionIndex, - existential_deposit: Balance, validator_pool: bool, nominate: bool, validator_count: u32, minimum_validator_count: u32, - slash_defer_duration: EraIndex, fair: bool, num_validators: Option, invulnerables: Vec, has_stakers: bool, - max_offchain_iterations: u32, + initialize_first_session: bool, } impl Default for ExtBuilder { fn default() -> Self { Self { - session_length: 1, - election_lookahead: 0, - session_per_era: 3, - existential_deposit: 1, validator_pool: false, nominate: true, validator_count: 2, minimum_validator_count: 0, - slash_defer_duration: 0, fair: true, num_validators: None, invulnerables: vec![], has_stakers: true, - max_offchain_iterations: 0, + initialize_first_session: true, } } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: Balance) -> Self { - self.existential_deposit = existential_deposit; + pub fn existential_deposit(self, existential_deposit: Balance) -> Self { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = existential_deposit); self } pub fn validator_pool(mut self, validator_pool: bool) -> Self { @@ -393,8 +323,8 @@ impl ExtBuilder { self.minimum_validator_count = count; self } - pub fn slash_defer_duration(mut self, eras: EraIndex) -> Self { - self.slash_defer_duration = eras; + pub fn slash_defer_duration(self, eras: EraIndex) -> Self { + SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = eras); self } pub fn fair(mut self, is_fair: bool) -> Self { @@ -409,52 +339,51 @@ impl ExtBuilder { self.invulnerables = invulnerables; self } - pub fn session_per_era(mut self, length: SessionIndex) -> Self { - self.session_per_era = length; + pub fn session_per_era(self, length: SessionIndex) -> Self { + SESSIONS_PER_ERA.with(|v| *v.borrow_mut() = length); self } - pub fn election_lookahead(mut self, look: BlockNumber) -> Self { - self.election_lookahead = look; + pub fn election_lookahead(self, look: BlockNumber) -> Self { + ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = look); self } - pub fn session_length(mut self, length: BlockNumber) -> Self { - self.session_length = length; + pub fn period(self, length: BlockNumber) -> Self { + PERIOD.with(|v| *v.borrow_mut() = length); self } pub fn has_stakers(mut self, has: bool) -> Self { self.has_stakers = has; self } - pub fn max_offchain_iterations(mut self, iterations: u32) -> Self { - self.max_offchain_iterations = iterations; + pub fn max_offchain_iterations(self, iterations: u32) -> Self { + MAX_ITERATIONS.with(|v| *v.borrow_mut() = iterations); self } pub fn offchain_election_ext(self) -> Self { - self.session_per_era(4) - .session_length(5) - .election_lookahead(3) - } - pub fn set_associated_constants(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = self.slash_defer_duration); - SESSION_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); - ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = self.election_lookahead); - PERIOD.with(|v| *v.borrow_mut() = self.session_length); - MAX_ITERATIONS.with(|v| *v.borrow_mut() = self.max_offchain_iterations); + self.session_per_era(4).period(5).election_lookahead(3) + } + pub fn initialize_first_session(mut self, init: bool) -> Self { + self.initialize_first_session = init; + self + } + pub fn offset(self, offset: BlockNumber) -> Self { + OFFSET.with(|v| *v.borrow_mut() = offset); + self } pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - self.set_associated_constants(); let mut storage = frame_system::GenesisConfig::default() .build_storage::() .unwrap(); - let balance_factor = if self.existential_deposit > 1 { + let balance_factor = if ExistentialDeposit::get() > 1 { 256 } else { 1 }; let num_validators = self.num_validators.unwrap_or(self.validator_count); + // Check that the number of validators is sensible. + assert!(num_validators <= 8); let validators = (0..num_validators) .map(|x| ((x + 1) * 10 + 1) as AccountId) .collect::>(); @@ -473,6 +402,14 @@ impl ExtBuilder { (31, balance_factor * 2000), (40, balance_factor), (41, balance_factor * 2000), + (50, balance_factor), + (51, balance_factor * 2000), + (60, balance_factor), + (61, balance_factor * 2000), + (70, balance_factor), + (71, balance_factor * 2000), + (80, balance_factor), + (81, balance_factor * 2000), (100, 2000 * balance_factor), (101, 2000 * balance_factor), // This allows us to have a total_payout different from 0. @@ -500,7 +437,7 @@ impl ExtBuilder { (101, 100, balance_factor * 500, StakerStatus::::Nominator(nominated)) ]; } - let _ = GenesisConfig::{ + let _ = staking::GenesisConfig::{ stakers: stakers, validator_count: self.validator_count, minimum_validator_count: self.minimum_validator_count, @@ -524,13 +461,17 @@ impl ExtBuilder { SESSION.with(|x| *x.borrow_mut() = (validators.clone(), HashSet::new())); }); - // We consider all test to start after timestamp is initialized - // This must be ensured by having `timestamp::on_initialize` called before - // `staking::on_initialize` - ext.execute_with(|| { - System::set_block_number(1); - Timestamp::set_timestamp(INIT_TIMESTAMP); - }); + if self.initialize_first_session { + // We consider all test to start after timestamp is initialized This must be ensured by + // having `timestamp::on_initialize` called before `staking::on_initialize`. Also, if + // session length is 1, then it is already triggered. + ext.execute_with(|| { + System::set_block_number(1); + Session::on_initialize(1); + Staking::on_initialize(1); + Timestamp::set_timestamp(INIT_TIMESTAMP); + }); + } ext } @@ -541,26 +482,12 @@ impl ExtBuilder { } } -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; -pub type Session = pallet_session::Module; -pub type Timestamp = pallet_timestamp::Module; -pub type Staking = Module; - -pub(crate) fn current_era() -> EraIndex { - Staking::current_era().unwrap() -} - fn post_conditions() { check_nominators(); check_exposures(); check_ledgers(); } -pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index -} - fn check_ledgers() { // check the ledger of all stakers. Bonded::::iter().for_each(|(_, ctrl)| assert_ledger_consistent(ctrl)) @@ -628,8 +555,26 @@ fn assert_is_stash(acc: AccountId) { fn assert_ledger_consistent(ctrl: AccountId) { // ensures ledger.total == ledger.active + sum(ledger.unlocking). let ledger = Staking::ledger(ctrl).expect("Not a controller."); - let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); + let real_total: Balance = ledger + .unlocking + .iter() + .fold(ledger.active, |a, c| a + c.value); assert_eq!(real_total, ledger.total); + assert!( + ledger.active >= Balances::minimum_balance() || ledger.active == 0, + "{}: active ledger amount ({}) must be greater than ED {}", + ctrl, + ledger.active, + Balances::minimum_balance() + ); +} + +pub(crate) fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} + +pub(crate) fn current_era() -> EraIndex { + Staking::current_era().unwrap() } pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { @@ -664,56 +609,102 @@ pub(crate) fn bond_nominator( assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); } +/// Progress to the given block, triggering session and era changes as we progress. +/// +/// This will finalize the previous block, initialize up to the given block, essentially simulating +/// a block import/propose process where we first initialize the block, then execute some stuff (not +/// in the function), and then finalize the block. pub(crate) fn run_to_block(n: BlockNumber) { Staking::on_finalize(System::block_number()); - for b in System::block_number() + 1..=n { + for b in (System::block_number() + 1)..=n { System::set_block_number(b); Session::on_initialize(b); Staking::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); if b != n { Staking::on_finalize(System::block_number()); } } } +/// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. +pub(crate) fn start_session(session_index: SessionIndex) { + let end: u64 = if Offset::get().is_zero() { + (session_index as u64) * Period::get() + } else { + Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() + }; + run_to_block(end); + // session must have progressed properly. + assert_eq!( + Session::current_index(), + session_index, + "current session index = {}, expected = {}", + Session::current_index(), + session_index, + ); +} + +/// Go one session forward. pub(crate) fn advance_session() { let current_index = Session::current_index(); start_session(current_index + 1); } -pub(crate) fn start_session(session_index: SessionIndex) { - assert_eq!(>::get(), 1, "start_session can only be used with session length 1."); - for i in Session::current_index()..session_index { - Staking::on_finalize(System::block_number()); - System::set_block_number((i + 1).into()); - Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); - Session::on_initialize(System::block_number()); - Staking::on_initialize(System::block_number()); - } - - assert_eq!(Session::current_index(), session_index); -} - -// This start and activate the era given. -// Because the mock use pallet-session which delays session by one, this will be one session after -// the election happened, not the first session after the election has happened. -pub(crate) fn start_era(era_index: EraIndex) { +/// Progress until the given era. +pub(crate) fn start_active_era(era_index: EraIndex) { start_session((era_index * >::get()).into()); - assert_eq!(Staking::current_era().unwrap(), era_index); - assert_eq!(Staking::active_era().unwrap().index, era_index); + assert_eq!(active_era(), era_index); + // One way or another, current_era must have changed before the active era, so they must match + // at this point. + assert_eq!(current_era(), active_era()); } pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { + let reward = inflation::compute_total_payout( + ::RewardCurve::get(), + Staking::eras_total_stake(active_era()), + Balances::total_issuance(), + duration, + ) + .0; + assert!(reward > 0); + reward +} + +pub(crate) fn maximum_payout_for_duration(duration: u64) -> Balance { inflation::compute_total_payout( - ::RewardCurve::get(), - Staking::eras_total_stake(Staking::active_era().unwrap().index), + ::RewardCurve::get(), + 0, Balances::total_issuance(), duration, - ).0 + ) + .1 +} + +/// Time it takes to finish a session. +/// +/// Note, if you see `time_per_session() - BLOCK_TIME`, it is fine. This is because we set the +/// timestamp after on_initialize, so the timestamp is always one block old. +pub(crate) fn time_per_session() -> u64 { + Period::get() * BLOCK_TIME +} + +/// Time it takes to finish an era. +/// +/// Note, if you see `time_per_era() - BLOCK_TIME`, it is fine. This is because we set the +/// timestamp after on_initialize, so the timestamp is always one block old. +pub(crate) fn time_per_era() -> u64 { + time_per_session() * SessionsPerEra::get() as u64 +} + +/// Time that will be calculated for the reward per era. +pub(crate) fn reward_time_per_era() -> u64 { + time_per_era() - BLOCK_TIME } pub(crate) fn reward_all_elected() { - let rewards = ::SessionInterface::validators() + let rewards = ::SessionInterface::validators() .into_iter() .map(|v| (v, 1)); @@ -850,8 +841,8 @@ pub(crate) fn horrible_npos_solution( let score = { let (_, _, better_score) = prepare_submission_with(true, true, 0, |_| {}); - let support = build_support_map::(&winners, &staked_assignment).unwrap(); - let score = evaluate_support(&support); + let support = to_support_map::(&winners, &staked_assignment).unwrap(); + let score = support.evaluate(); assert!(sp_npos_elections::is_score_better::( better_score, @@ -950,11 +941,11 @@ pub(crate) fn prepare_submission_with( Staking::slashable_balance_of_fn(), ); - let support_map = build_support_map::( + let support_map = to_support_map::( winners.as_slice(), staked.as_slice(), ).unwrap(); - evaluate_support::(&support_map) + support_map.evaluate() } else { Default::default() }; @@ -978,8 +969,11 @@ pub(crate) fn make_all_reward_payment(era: EraIndex) { // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - - assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); + assert_ok!(Staking::payout_stakers( + Origin::signed(1337), + ledger.stash, + era + )); } } @@ -1003,9 +997,9 @@ macro_rules! assert_session_era { }; } -pub(crate) fn staking_events() -> Vec> { +pub(crate) fn staking_events() -> Vec> { System::events().into_iter().map(|r| r.event).filter_map(|e| { - if let MetaEvent::staking(inner) = e { + if let Event::staking(inner) = e { Some(inner) } else { None diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs index cb4d460f68035..4f80d75086e7e 100644 --- a/frame/staking/src/offchain_election.rs +++ b/frame/staking/src/offchain_election.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +19,17 @@ use crate::{ Call, CompactAssignments, ElectionSize, Module, NominatorIndex, Nominators, OffchainAccuracy, - Trait, ValidatorIndex, WeightInfo, + Config, ValidatorIndex, WeightInfo, }; use codec::Decode; use frame_support::{traits::Get, weights::Weight, IterableStorageMap}; use frame_system::offchain::SubmitTransaction; use sp_npos_elections::{ - build_support_map, evaluate_support, reduce, Assignment, ElectionResult, ElectionScore, - ExtendedBalance, + to_support_map, EvaluateSupport, reduce, Assignment, ElectionResult, ElectionScore, + ExtendedBalance, CompactSolution, }; use sp_runtime::{ - offchain::storage::StorageValueRef, traits::TrailingZeroInput, PerThing, RuntimeDebug, + offchain::storage::StorageValueRef, traits::TrailingZeroInput, RuntimeDebug, }; use sp_std::{convert::TryInto, prelude::*}; @@ -71,7 +71,7 @@ pub(crate) const DEFAULT_LONGEVITY: u64 = 25; /// don't run twice within a window of length [`OFFCHAIN_REPEAT`]. /// /// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. -pub(crate) fn set_check_offchain_execution_status( +pub(crate) fn set_check_offchain_execution_status( now: T::BlockNumber, ) -> Result<(), &'static str> { let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); @@ -108,7 +108,7 @@ pub(crate) fn set_check_offchain_execution_status( /// The internal logic of the offchain worker of this module. This runs the phragmen election, /// compacts and reduces the solution, computes the score and submits it back to the chain as an /// unsigned transaction, without any signature. -pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { +pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { let iters = get_balancing_iters::(); // compute raw solution. Note that we use `OffchainAccuracy`. let ElectionResult { @@ -151,7 +151,7 @@ pub(crate) fn compute_offchain_election() -> Result<(), OffchainElecti /// Get a random number of iterations to run the balancing. /// /// Uses the offchain seed to generate a random number. -pub fn get_balancing_iters() -> usize { +pub fn get_balancing_iters() -> usize { match T::MaxIterations::get() { 0 => 0, max @ _ => { @@ -257,7 +257,7 @@ pub fn maximum_compact_len( /// /// Indeed, the score must be computed **after** this step. If this step reduces the score too much, /// then the solution will be discarded. -pub fn trim_to_weight( +pub fn trim_to_weight( maximum_allowed_voters: u32, mut compact: CompactAssignments, nominator_index: FN, @@ -265,7 +265,7 @@ pub fn trim_to_weight( where for<'r> FN: Fn(&'r T::AccountId) -> Option, { - match compact.len().checked_sub(maximum_allowed_voters as usize) { + match compact.voter_count().checked_sub(maximum_allowed_voters as usize) { Some(to_remove) if to_remove > 0 => { // grab all voters and sort them by least stake. let balance_of = >::slashable_balance_of_fn(); @@ -300,7 +300,7 @@ where warn, "💸 {} nominators out of {} had to be removed from compact solution due to size limits.", removed, - compact.len() + removed, + compact.voter_count() + removed, ); Ok(compact) } @@ -318,23 +318,15 @@ where /// Takes an election result and spits out some data that can be submitted to the chain. /// /// This does a lot of stuff; read the inline comments. -pub fn prepare_submission( +pub fn prepare_submission( assignments: Vec>, winners: Vec<(T::AccountId, ExtendedBalance)>, do_reduce: bool, maximum_weight: Weight, ) -> Result< - ( - Vec, - CompactAssignments, - ElectionScore, - ElectionSize, - ), + (Vec, CompactAssignments, ElectionScore, ElectionSize), OffchainElectionError, -> -where - ExtendedBalance: From<::Inner>, -{ +> { // make sure that the snapshot is available. let snapshot_validators = >::snapshot_validators().ok_or(OffchainElectionError::SnapshotUnavailable)?; @@ -403,11 +395,11 @@ where T::WeightInfo::submit_solution_better( size.validators.into(), size.nominators.into(), - compact.len() as u32, + compact.voter_count() as u32, winners.len() as u32, ), maximum_allowed_voters, - compact.len(), + compact.voter_count(), ); let compact = trim_to_weight::(maximum_allowed_voters, compact, &nominator_index)?; @@ -423,9 +415,9 @@ where >::slashable_balance_of_fn(), ); - let support_map = build_support_map::(&winners, &staked) + let support_map = to_support_map::(&winners, &staked) .map_err(|_| OffchainElectionError::ElectionFailed)?; - evaluate_support::(&support_map) + support_map.evaluate() }; // winners to index. Use a simple for loop for a more expressive early exit in case of error. @@ -525,6 +517,9 @@ mod test { fn submit_solution_better(v: u32, n: u32, a: u32, w: u32) -> Weight { (0 * v + 0 * n + 1000 * a + 0 * w) as Weight } + fn kick(w: u32) -> Weight { + unimplemented!() + } } #[test] diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index af9a92f16a463..2b2ac61356c47 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,10 +47,10 @@ //! has multiple misbehaviors. However, accounting for such cases is necessary //! to deter a class of "rage-quit" attacks. //! -//! Based on research at https://research.web3.foundation/en/latest/polkadot/slashing/npos/ +//! Based on research at use super::{ - EraIndex, Trait, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, + EraIndex, Config, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, NegativeImbalanceOf, UnappliedSlash, Error, }; use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug, DispatchResult}; @@ -190,7 +190,7 @@ impl SpanRecord { /// Parameters for performing a slash. #[derive(Clone)] -pub(crate) struct SlashParams<'a, T: 'a + Trait> { +pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The stash account being slashed. pub(crate) stash: &'a T::AccountId, /// The proportion of the slash. @@ -214,7 +214,7 @@ pub(crate) struct SlashParams<'a, T: 'a + Trait> { /// /// The pending slash record returned does not have initialized reporters. Those have /// to be set at a higher level, if any. -pub(crate) fn compute_slash(params: SlashParams) +pub(crate) fn compute_slash(params: SlashParams) -> Option>> { let SlashParams { @@ -309,7 +309,7 @@ pub(crate) fn compute_slash(params: SlashParams) // doesn't apply any slash, but kicks out the validator if the misbehavior is from // the most recent slashing span. -fn kick_out_if_recent( +fn kick_out_if_recent( params: SlashParams, ) { // these are not updated by era-span or end-span. @@ -338,7 +338,7 @@ fn kick_out_if_recent( /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. /// /// Returns the amount of reward to pay out. -fn slash_nominators( +fn slash_nominators( params: SlashParams, prior_slash_p: Perbill, nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, @@ -418,7 +418,7 @@ fn slash_nominators( // dropping this struct applies any necessary slashes, which can lead to free balance // being 0, and the account being garbage-collected -- a dead account should get no new // metadata. -struct InspectingSpans<'a, T: Trait + 'a> { +struct InspectingSpans<'a, T: Config + 'a> { dirty: bool, window_start: EraIndex, stash: &'a T::AccountId, @@ -430,7 +430,7 @@ struct InspectingSpans<'a, T: Trait + 'a> { } // fetches the slashing spans record for a stash account, initializing it if necessary. -fn fetch_spans<'a, T: Trait + 'a>( +fn fetch_spans<'a, T: Config + 'a>( stash: &'a T::AccountId, window_start: EraIndex, paid_out: &'a mut BalanceOf, @@ -455,7 +455,7 @@ fn fetch_spans<'a, T: Trait + 'a>( } } -impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { +impl<'a, T: 'a + Config> InspectingSpans<'a, T> { fn span_index(&self) -> SpanIndex { self.spans.span_index } @@ -526,7 +526,7 @@ impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { } } -impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> { +impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { fn drop(&mut self) { // only update on disk if we slashed this account. if !self.dirty { return } @@ -542,13 +542,13 @@ impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> { } /// Clear slashing metadata for an obsolete era. -pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { +pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); } /// Clear slashing metadata for a dead account. -pub(crate) fn clear_stash_metadata( +pub(crate) fn clear_stash_metadata( stash: &T::AccountId, num_slashing_spans: u32, ) -> DispatchResult { @@ -576,7 +576,7 @@ pub(crate) fn clear_stash_metadata( // apply the slash to a stash account, deducting any missing funds from the reward // payout, saturating at 0. this is mildly unfair but also an edge-case that // can only occur when overlapping locked funds have been slashed. -pub fn do_slash( +pub fn do_slash( stash: &T::AccountId, value: BalanceOf, reward_payout: &mut BalanceOf, @@ -613,7 +613,7 @@ pub fn do_slash( } /// Apply a previously-unapplied slash. -pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { +pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { let mut slashed_imbalance = NegativeImbalanceOf::::zero(); let mut reward_payout = unapplied_slash.payout; @@ -638,7 +638,7 @@ pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash( +fn pay_reporters( reward_payout: BalanceOf, slashed_imbalance: NegativeImbalanceOf, reporters: &[T::AccountId], diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 57ad95bcf586f..a30c0136550b4 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,8 +28,14 @@ use sp_npos_elections::*; const SEED: u32 = 0; +/// This function removes all validators and nominators from storage. +pub fn clear_validators_and_nominators() { + Validators::::remove_all(); + Nominators::::remove_all(); +} + /// Grab a funded user. -pub fn create_funded_user( +pub fn create_funded_user( string: &'static str, n: u32, balance_factor: u32, @@ -43,7 +49,7 @@ pub fn create_funded_user( } /// Create a stash and controller pair. -pub fn create_stash_controller( +pub fn create_stash_controller( n: u32, balance_factor: u32, destination: RewardDestination, @@ -60,7 +66,7 @@ pub fn create_stash_controller( /// Create a stash and controller pair, where the controller is dead, and payouts go to controller. /// This is used to test worst case payout scenarios. -pub fn create_stash_and_dead_controller( +pub fn create_stash_and_dead_controller( n: u32, balance_factor: u32, destination: RewardDestination, @@ -77,7 +83,7 @@ pub fn create_stash_and_dead_controller( } /// create `max` validators. -pub fn create_validators( +pub fn create_validators( max: u32, balance_factor: u32, ) -> Result::Source>, &'static str> { @@ -86,6 +92,7 @@ pub fn create_validators( let (stash, controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), + .. Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(stash); @@ -97,6 +104,9 @@ pub fn create_validators( /// This function generates validators and nominators who are randomly nominating /// `edge_per_nominator` random validators (until `to_nominate` if provided). /// +/// NOTE: This function will remove any existing validators or nominators to ensure +/// we are working with a clean state. +/// /// Parameters: /// - `validators`: number of bonded validators /// - `nominators`: number of bonded nominators. @@ -106,13 +116,15 @@ pub fn create_validators( /// Else, all of them are considered and `edge_per_nominator` random validators are voted for. /// /// Return the validators choosen to be nominated. -pub fn create_validators_with_nominators_for_era( +pub fn create_validators_with_nominators_for_era( validators: u32, nominators: u32, edge_per_nominator: usize, randomize_stake: bool, to_nominate: Option, ) -> Result::Source>, &'static str> { + clear_validators_and_nominators::(); + let mut validators_stash: Vec<::Source> = Vec::with_capacity(validators as usize); let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); @@ -123,6 +135,7 @@ pub fn create_validators_with_nominators_for_era( let (v_stash, v_controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), + .. Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); @@ -162,7 +175,7 @@ pub fn create_validators_with_nominators_for_era( /// Build a _really bad_ but acceptable solution for election. This should always yield a solution /// which has a less score than the seq-phragmen. -pub fn get_weak_solution( +pub fn get_weak_solution( do_reduce: bool, ) -> (Vec, CompactAssignments, ElectionScore, ElectionSize) { let mut backing_stake_of: BTreeMap> = BTreeMap::new(); @@ -233,11 +246,9 @@ pub fn get_weak_solution( >::slashable_balance_of_fn(), ); - let support_map = build_support_map::( - winners.as_slice(), - staked.as_slice(), - ).unwrap(); - evaluate_support::(&support_map) + let support_map = + to_support_map::(winners.as_slice(), staked.as_slice()).unwrap(); + support_map.evaluate() }; // compact encode the assignment. @@ -271,7 +282,7 @@ pub fn get_weak_solution( /// Create a solution for seq-phragmen. This uses the same internal function as used by the offchain /// worker code. -pub fn get_seq_phragmen_solution( +pub fn get_seq_phragmen_solution( do_reduce: bool, ) -> ( Vec, @@ -290,13 +301,13 @@ pub fn get_seq_phragmen_solution( assignments, winners, do_reduce, - T::MaximumBlockWeight::get(), + T::BlockWeights::get().max_block, ) .unwrap() } /// Returns a solution in which only one winner is elected with just a self vote. -pub fn get_single_winner_solution( +pub fn get_single_winner_solution( winner: T::AccountId, ) -> Result< ( @@ -341,7 +352,7 @@ pub fn get_single_winner_solution( } /// get the active era. -pub fn current_era() -> EraIndex { +pub fn current_era() -> EraIndex { >::current_era().unwrap_or(0) } @@ -355,7 +366,7 @@ pub fn init_active_era() { /// Create random assignments for the given list of winners. Each assignment will have /// MAX_NOMINATIONS edges. -pub fn create_assignments_for_offchain( +pub fn create_assignments_for_offchain( num_assignments: u32, winners: Vec<::Source>, ) -> Result< diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 2a02d87aa2c57..1f5e2a48888a5 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,7 +45,7 @@ fn force_unstake_works() { // Force unstake requires root. assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 2), BadOrigin); // Force unstake needs correct number of slashing spans (for weight calculation) - assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 0), BadOrigin); + assert_noop!(Staking::force_unstake(Origin::root(), 11, 0), Error::::IncorrectSlashingSpans); // We now force them to unstake assert_ok!(Staking::force_unstake(Origin::root(), 11, 2)); // No longer bonded. @@ -158,7 +158,7 @@ fn change_controller_works() { // change controller assert_ok!(Staking::set_controller(Origin::signed(11), 5)); assert_eq!(Staking::bonded(&11), Some(5)); - mock::start_era(1); + mock::start_active_era(1); // 10 is no longer in control. assert_noop!( @@ -171,12 +171,7 @@ fn change_controller_works() { #[test] fn rewards_should_work() { - // should check that: - // * rewards get recorded per session - // * rewards get paid per Era - // * `RewardRemainder::on_unbalanced` is called - // * Check that nominators are also rewarded - ExtBuilder::default().nominate(true).build_and_execute(|| { + ExtBuilder::default().nominate(true).session_per_era(3).build_and_execute(|| { let init_balance_10 = Balances::total_balance(&10); let init_balance_11 = Balances::total_balance(&11); let init_balance_20 = Balances::total_balance(&20); @@ -184,7 +179,7 @@ fn rewards_should_work() { let init_balance_100 = Balances::total_balance(&100); let init_balance_101 = Balances::total_balance(&101); - // Check state + // Set payees Payee::::insert(11, RewardDestination::Controller); Payee::::insert(21, RewardDestination::Controller); Payee::::insert(101, RewardDestination::Controller); @@ -194,9 +189,9 @@ fn rewards_should_work() { // This is the second validator of the current elected set. >::reward_by_ids(vec![(21, 50)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something + // Compute total payout now for whole duration of the session. + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + let maximum_payout = maximum_payout_for_duration(reward_time_per_era()); start_session(1); @@ -207,10 +202,13 @@ fn rewards_should_work() { assert_eq!(Balances::total_balance(&100), init_balance_100); assert_eq!(Balances::total_balance(&101), init_balance_101); assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); + assert_eq!( + Staking::eras_reward_points(Staking::active_era().unwrap().index), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); @@ -220,14 +218,28 @@ fn rewards_should_work() { start_session(3); assert_eq!(Staking::active_era().unwrap().index, 1); - assert_eq!(mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), 7050); - assert_eq!(*mock::staking_events().last().unwrap(), RawEvent::EraPayout(0, 2350, 7050)); + assert_eq!( + mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), + maximum_payout - total_payout_0, + ); + assert_eq!( + *mock::staking_events().last().unwrap(), + RawEvent::EraPayout(0, total_payout_0, maximum_payout - total_payout_0) + ); mock::make_all_reward_payment(0); - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * total_payout_0 * 2 / 3, + 2, + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2,); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2, + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2,); assert_eq_error_rate!( Balances::total_balance(&100), init_balance_100 @@ -241,18 +253,31 @@ fn rewards_should_work() { >::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); - mock::start_era(2); - assert_eq!(mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), 7050*2); - assert_eq!(*mock::staking_events().last().unwrap(), RawEvent::EraPayout(1, 2350, 7050)); + mock::start_active_era(2); + assert_eq!( + mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), + maximum_payout * 2 - total_payout_0 - total_payout_1, + ); + assert_eq!( + *mock::staking_events().last().unwrap(), + RawEvent::EraPayout(1, total_payout_1, maximum_payout - total_payout_1) + ); mock::make_all_reward_payment(1); - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_0 * 2 / 3 + total_payout_1), + 2, + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2,); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2, + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2,); assert_eq_error_rate!( Balances::total_balance(&100), init_balance_100 @@ -266,18 +291,11 @@ fn rewards_should_work() { #[test] fn staking_should_work() { - // should test: - // * new validators can be added to the default set - // * new ones will be chosen per era - // * either one can unlock the stash and back-down from being a validator via `chill`ing. ExtBuilder::default() .nominate(false) .fair(false) // to give 20 more staked value .build() .execute_with(|| { - // --- Block 1: - start_session(1); - // remember + compare this along with the test. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -346,6 +364,30 @@ fn staking_should_work() { }); } +#[test] +fn blocking_and_kicking_works() { + ExtBuilder::default() + .minimum_validator_count(1) + .validator_count(4) + .nominate(true) + .num_validators(3) + .build() + .execute_with(|| { + // block validator 10/11 + assert_ok!(Staking::validate(Origin::signed(10), ValidatorPrefs { blocked: true, .. Default::default() })); + // attempt to nominate from 100/101... + assert_ok!(Staking::nominate(Origin::signed(100), vec![11])); + // should have worked since we're already nominated them + assert_eq!(Nominators::::get(&101).unwrap().targets, vec![11]); + // kick the nominator + assert_ok!(Staking::kick(Origin::signed(10), vec![101])); + // should have been kicked now + assert!(Nominators::::get(&101).unwrap().targets.is_empty()); + // attempt to nominate from 100/101... + assert_noop!(Staking::nominate(Origin::signed(100), vec![11]), Error::::BadTarget); + }); +} + #[test] fn less_than_needed_candidates_works() { ExtBuilder::default() @@ -359,7 +401,7 @@ fn less_than_needed_candidates_works() { assert_eq!(Staking::minimum_validator_count(), 1); assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); - mock::start_era(1); + mock::start_active_era(1); // Previous set is selected. NO election algorithm is even executed. assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); @@ -385,7 +427,7 @@ fn no_candidate_emergency_condition() { .execute_with(|| { // initial validators assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - let prefs = ValidatorPrefs { commission: Perbill::one() }; + let prefs = ValidatorPrefs { commission: Perbill::one(), .. Default::default() }; ::Validators::insert(11, prefs.clone()); // set the minimum validator count. @@ -395,7 +437,7 @@ fn no_candidate_emergency_condition() { let _ = Staking::chill(Origin::signed(10)); // trigger era - mock::start_era(1); + mock::start_active_era(1); // Previous ones are elected. chill is invalidates. TODO: #2494 assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); @@ -435,12 +477,11 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); // the total reward for era 0 - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); >::reward_by_ids(vec![(41, 1)]); >::reward_by_ids(vec![(31, 1)]); - mock::start_era(1); + mock::start_active_era(1); // 10 and 20 have more votes, they will be chosen. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -478,12 +519,11 @@ fn nominating_and_rewards_should_work() { ); // the total reward for era 1 - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); >::reward_by_ids(vec![(21, 2)]); >::reward_by_ids(vec![(11, 1)]); - mock::start_era(2); + mock::start_active_era(2); // nothing else will happen, era ends and rewards are paid again, // it is expected that nominators will also be paid. See below @@ -495,26 +535,26 @@ fn nominating_and_rewards_should_work() { assert_eq_error_rate!( Balances::total_balance(&2), initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, + 2, ); // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 assert_eq_error_rate!( Balances::total_balance(&4), initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, + 2, ); // Validator 10: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 assert_eq_error_rate!( Balances::total_balance(&10), initial_balance + 5 * payout_for_10 / 9, - 1, + 2, ); // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 assert_eq_error_rate!( Balances::total_balance(&20), initial_balance + 5 * payout_for_20 / 11, - 1, + 2, ); }); } @@ -522,7 +562,7 @@ fn nominating_and_rewards_should_work() { #[test] fn nominators_also_get_slashed_pro_rata() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); let slash_percent = Perbill::from_percent(5); let initial_exposure = Staking::eras_stakers(active_era(), 11); // 101 is a nominator for 11 @@ -637,40 +677,87 @@ fn double_controlling_should_fail() { } #[test] -fn session_and_eras_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::active_era().unwrap().index, 0); - assert_eq!(Session::current_index(), 0); +fn session_and_eras_work_simple() { + ExtBuilder::default().period(1).build_and_execute(|| { + assert_eq!(active_era(), 0); + assert_eq!(current_era(), 0); + assert_eq!(Session::current_index(), 1); + assert_eq!(System::block_number(), 1); - // Session 1: No change. + // Session 1: this is basically a noop. This has already been started. start_session(1); assert_eq!(Session::current_index(), 1); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 1); // Session 2: No change. start_session(2); assert_eq!(Session::current_index(), 2); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 2); // Session 3: Era increment. start_session(3); assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 3); // Session 4: No change. start_session(4); assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 4); // Session 5: No change. start_session(5); assert_eq!(Session::current_index(), 5); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 5); // Session 6: Era increment. start_session(6); assert_eq!(Session::current_index(), 6); - assert_eq!(Staking::active_era().unwrap().index, 2); + assert_eq!(active_era(), 2); + assert_eq!(System::block_number(), 6); + }); +} + +#[test] +fn session_and_eras_work_complex() { + ExtBuilder::default().period(5).build_and_execute(|| { + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + assert_eq!(System::block_number(), 1); + + start_session(1); + assert_eq!(Session::current_index(), 1); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 5); + + start_session(2); + assert_eq!(Session::current_index(), 2); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 10); + + start_session(3); + assert_eq!(Session::current_index(), 3); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 15); + + start_session(4); + assert_eq!(Session::current_index(), 4); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 20); + + start_session(5); + assert_eq!(Session::current_index(), 5); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 25); + + start_session(6); + assert_eq!(Session::current_index(), 6); + assert_eq!(active_era(), 2); + assert_eq!(System::block_number(), 30); }); } @@ -678,53 +765,62 @@ fn session_and_eras_work() { fn forcing_new_era_works() { ExtBuilder::default().build_and_execute(|| { // normal flow of session. - assert_eq!(Staking::active_era().unwrap().index, 0); - start_session(0); - assert_eq!(Staking::active_era().unwrap().index, 0); start_session(1); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + start_session(2); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + start_session(3); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); // no era change. ForceEra::put(Forcing::ForceNone); + start_session(4); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + start_session(5); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + start_session(6); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + start_session(7); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); // back to normal. // this immediately starts a new session. ForceEra::put(Forcing::NotForcing); + start_session(8); - assert_eq!(Staking::active_era().unwrap().index, 1); // There is one session delay - start_session(9); - assert_eq!(Staking::active_era().unwrap().index, 2); + assert_eq!(active_era(), 1); + start_session(9); + assert_eq!(active_era(), 2); // forceful change ForceEra::put(Forcing::ForceAlways); + start_session(10); - assert_eq!(Staking::active_era().unwrap().index, 2); // There is one session delay + assert_eq!(active_era(), 2); + start_session(11); - assert_eq!(Staking::active_era().unwrap().index, 3); + assert_eq!(active_era(), 3); + start_session(12); - assert_eq!(Staking::active_era().unwrap().index, 4); + assert_eq!(active_era(), 4); // just one forceful change ForceEra::put(Forcing::ForceNew); start_session(13); - assert_eq!(Staking::active_era().unwrap().index, 5); + assert_eq!(active_era(), 5); assert_eq!(ForceEra::get(), Forcing::NotForcing); + start_session(14); - assert_eq!(Staking::active_era().unwrap().index, 6); + assert_eq!(active_era(), 6); + start_session(15); - assert_eq!(Staking::active_era().unwrap().index, 6); + assert_eq!(active_era(), 6); }); } @@ -738,7 +834,7 @@ fn cannot_transfer_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( Balances::transfer(Origin::signed(11), 20, 1), @@ -816,11 +912,10 @@ fn reward_destination_works() { })); // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); >::reward_by_ids(vec![(11, 1)]); - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); // Check that RewardDestination is Staked (default) @@ -840,11 +935,10 @@ fn reward_destination_works() { >::insert(&11, RewardDestination::Stash); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); >::reward_by_ids(vec![(11, 1)]); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); // Check that RewardDestination is Stash @@ -869,11 +963,10 @@ fn reward_destination_works() { assert_eq!(Balances::free_balance(10), 1); // Compute total payout now for whole duration as other parameter won't change - let total_payout_2 = current_total_payout_for_duration(3000); - assert!(total_payout_2 > 100); // Test is meaningful if reward something + let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); >::reward_by_ids(vec![(11, 1)]); - mock::start_era(3); + mock::start_active_era(3); mock::make_all_reward_payment(2); // Check that RewardDestination is Controller @@ -902,25 +995,25 @@ fn validator_payment_prefs_work() { let commission = Perbill::from_percent(40); >::insert(&11, ValidatorPrefs { commission: commission.clone(), + .. Default::default() }); // Reward controller so staked ratio doesn't change. >::insert(&11, RewardDestination::Controller); >::insert(&101, RewardDestination::Controller); - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); let balance_era_1_10 = Balances::total_balance(&10); let balance_era_1_100 = Balances::total_balance(&100); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); >::reward_by_ids(vec![(11, 1)]); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); let taken_cut = commission * total_payout_1; @@ -995,13 +1088,12 @@ fn bond_extra_and_withdraw_unbonded_works() { // Initial config should be correct assert_eq!(Staking::active_era().unwrap().index, 0); - assert_eq!(Session::current_index(), 0); // check the balance of a validator accounts. assert_eq!(Balances::total_balance(&10), 1); // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); + mock::start_active_era(1); // Initial state of 10 assert_eq!(Staking::ledger(&10), Some(StakingLedger { @@ -1033,7 +1125,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // trigger next era. - mock::start_era(2); + mock::start_active_era(2); assert_eq!(Staking::active_era().unwrap().index, 2); // ledger should be the same. @@ -1077,7 +1169,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // trigger next era. - mock::start_era(3); + mock::start_active_era(3); // nothing yet assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); @@ -1093,7 +1185,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // trigger next era. - mock::start_era(5); + mock::start_active_era(5); assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); // Now the value is free and the staking ledger is updated. @@ -1118,14 +1210,14 @@ fn too_many_unbond_calls_should_not_work() { assert_ok!(Staking::unbond(Origin::signed(10), 1)); } - mock::start_era(1); + mock::start_active_era(1); // locked at era 1 until 4 assert_ok!(Staking::unbond(Origin::signed(10), 1)); // can't do more. assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); - mock::start_era(3); + mock::start_active_era(3); assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); // free up. @@ -1157,7 +1249,7 @@ fn rebond_works() { let _ = Balances::make_free_balance_be(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); + mock::start_active_era(1); // Initial state of 10 assert_eq!( @@ -1171,7 +1263,7 @@ fn rebond_works() { }) ); - mock::start_era(2); + mock::start_active_era(2); assert_eq!(Staking::active_era().unwrap().index, 2); // Try to rebond some funds. We get an error since no fund is unbonded. @@ -1302,7 +1394,7 @@ fn rebond_is_fifo() { let _ = Balances::make_free_balance_be(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); + mock::start_active_era(1); // Initial state of 10 assert_eq!( @@ -1316,7 +1408,7 @@ fn rebond_is_fifo() { }) ); - mock::start_era(2); + mock::start_active_era(2); // Unbond some of the funds in stash. Staking::unbond(Origin::signed(10), 400).unwrap(); @@ -1333,7 +1425,7 @@ fn rebond_is_fifo() { }) ); - mock::start_era(3); + mock::start_active_era(3); // Unbond more of the funds in stash. Staking::unbond(Origin::signed(10), 300).unwrap(); @@ -1351,7 +1443,7 @@ fn rebond_is_fifo() { }) ); - mock::start_era(4); + mock::start_active_era(4); // Unbond yet more of the funds in stash. Staking::unbond(Origin::signed(10), 200).unwrap(); @@ -1411,13 +1503,12 @@ fn reward_to_stake_works() { >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], claimed_rewards: vec![] }); // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); >::reward_by_ids(vec![(11, 1)]); >::reward_by_ids(vec![(21, 1)]); // New era --> rewards are paid --> stakes are changed - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); @@ -1427,7 +1518,7 @@ fn reward_to_stake_works() { assert_eq!(_11_balance, 1000 + total_payout_0 / 2); // Trigger another new era as the info are frozen before the era start. - mock::start_era(2); + mock::start_active_era(2); // -- new infos assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000 + total_payout_0 / 2); @@ -1474,7 +1565,7 @@ fn on_free_balance_zero_stash_removes_validator() { // Reduce free_balance of stash to 0 let _ = Balances::slash(&11, Balance::max_value()); // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); + assert_eq!(Balances::total_balance(&11), 10); // Reap the stash assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); @@ -1530,7 +1621,7 @@ fn on_free_balance_zero_stash_removes_nominator() { // Reduce free_balance of stash to 0 let _ = Balances::slash(&11, Balance::max_value()); // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); + assert_eq!(Balances::total_balance(&11), 10); // Reap the stash assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); @@ -1568,7 +1659,7 @@ fn switching_roles() { assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - mock::start_era(1); + mock::start_active_era(1); // with current nominators 10 and 5 have the most stake assert_eq_uvec!(validator_controllers(), vec![6, 10]); @@ -1582,7 +1673,7 @@ fn switching_roles() { // 2 : 2000 self vote + 250 vote. // Winners: 20 and 2 - mock::start_era(2); + mock::start_active_era(2); assert_eq_uvec!(validator_controllers(), vec![2, 20]); }); @@ -1604,7 +1695,7 @@ fn wrong_vote_is_null() { ])); // new block - mock::start_era(1); + mock::start_active_era(1); assert_eq_uvec!(validator_controllers(), vec![20, 10]); }); @@ -1643,15 +1734,15 @@ fn bond_with_no_staked_value() { }) ); - mock::start_era(1); - mock::start_era(2); + mock::start_active_era(1); + mock::start_active_era(2); // not yet removed. assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); assert!(Staking::ledger(2).is_some()); assert_eq!(Balances::locks(&1)[0].amount, 5); - mock::start_era(3); + mock::start_active_era(3); // poof. Account 1 is removed from the staking system. assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); @@ -1662,8 +1753,6 @@ fn bond_with_no_staked_value() { #[test] fn bond_with_little_staked_value_bounded() { - // Behavior when someone bonds with little staked value. - // Particularly when she votes and the candidate is elected. ExtBuilder::default() .validator_count(3) .nominate(false) @@ -1680,37 +1769,38 @@ fn bond_with_little_staked_value_bounded() { assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - // reward era 0 - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + // 1 era worth of reward. BUT, we set the timestamp after on_initialize, so outdated by + // one block. + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + reward_all_elected(); - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); // 2 is elected. assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - // And has minimal stake - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); // Old ones are rewarded. - assert_eq!(Balances::free_balance(10), init_balance_10 + total_payout_0 / 3); + assert_eq_error_rate!(Balances::free_balance(10), init_balance_10 + total_payout_0 / 3, 1); // no rewards paid to 2. This was initial election. assert_eq!(Balances::free_balance(2), init_balance_2); - // reward era 1 - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something + // reward era 2 + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); reward_all_elected(); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); - assert_eq!(Balances::free_balance(2), init_balance_2 + total_payout_1 / 3); - assert_eq!( + // 2 is now rewarded. + assert_eq_error_rate!(Balances::free_balance(2), init_balance_2 + total_payout_1 / 3, 1); + assert_eq_error_rate!( Balances::free_balance(&10), init_balance_10 + total_payout_0 / 3 + total_payout_1 / 3, + 2, ); }); } @@ -1842,7 +1932,7 @@ fn phragmen_should_not_overflow() { bond_nominator(7, 6, Votes::max_value() as Balance, vec![3, 5]); bond_nominator(9, 8, Votes::max_value() as Balance, vec![3, 5]); - mock::start_era(1); + mock::start_active_era(1); assert_eq_uvec!(validator_controllers(), vec![4, 2]); @@ -1983,10 +2073,10 @@ fn era_is_always_same_length() { ExtBuilder::default().build_and_execute(|| { let session_per_era = >::get(); - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era); - mock::start_era(2); + mock::start_active_era(2); assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era * 2u32); let session = Session::current_index(); @@ -1996,7 +2086,7 @@ fn era_is_always_same_length() { assert_eq!(current_era(), 3); assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2); - mock::start_era(4); + mock::start_active_era(4); assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2u32 + session_per_era); }); } @@ -2060,7 +2150,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); - mock::start_era(1); + mock::start_active_era(1); assert!(!Session::validators().contains(&11)); assert!(!>::contains_key(11)); @@ -2098,7 +2188,7 @@ fn slashing_performed_according_exposure() { #[test] fn slash_in_old_span_does_not_deselect() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); @@ -2117,14 +2207,14 @@ fn slash_in_old_span_does_not_deselect() { assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); - mock::start_era(2); + mock::start_active_era(2); Staking::validate(Origin::signed(10), Default::default()).unwrap(); assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(!Session::validators().contains(&11)); - mock::start_era(3); + mock::start_active_era(3); // this staker is in a new slashing span now, having re-registered after // their prior slash. @@ -2389,8 +2479,8 @@ fn garbage_collection_after_slashing() { // validator and nominator slash in era are garbage-collected by era change, // so we don't test those here. - assert_eq!(Balances::free_balance(11), 0); - assert_eq!(Balances::total_balance(&11), 0); + assert_eq!(Balances::free_balance(11), 2); + assert_eq!(Balances::total_balance(&11), 2); let slashing_spans = ::SlashingSpans::get(&11).unwrap(); assert_eq!(slashing_spans.iter().count(), 2); @@ -2409,7 +2499,7 @@ fn garbage_collection_on_window_pruning() { // ensures that `ValidatorSlashInEra` and `NominatorSlashInEra` are cleared after // `BondingDuration`. ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); let now = Staking::active_era().unwrap().index; @@ -2439,7 +2529,7 @@ fn garbage_collection_on_window_pruning() { assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); assert!(::NominatorSlashInEra::get(&now, &101).is_some()); - mock::start_era(era); + mock::start_active_era(era); } assert!(::ValidatorSlashInEra::get(&now, &11).is_none()); @@ -2450,9 +2540,9 @@ fn garbage_collection_on_window_pruning() { #[test] fn slashing_nominators_by_span_max() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - mock::start_era(2); - mock::start_era(3); + mock::start_active_era(1); + mock::start_active_era(2); + mock::start_active_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); @@ -2548,9 +2638,9 @@ fn slashing_nominators_by_span_max() { #[test] fn slashes_are_summed_across_spans() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - mock::start_era(2); - mock::start_era(3); + mock::start_active_era(1); + mock::start_active_era(2); + mock::start_active_era(3); assert_eq!(Balances::free_balance(21), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); @@ -2578,7 +2668,7 @@ fn slashes_are_summed_across_spans() { // 21 has been force-chilled. re-signal intent to validate. Staking::validate(Origin::signed(20), Default::default()).unwrap(); - mock::start_era(4); + mock::start_active_era(4); assert_eq!(Staking::slashable_balance_of(&21), 900); @@ -2605,16 +2695,18 @@ fn slashes_are_summed_across_spans() { #[test] fn deferred_slashes_are_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + ExtBuilder::default() + .slash_defer_duration(2) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( + on_offence_now( &[ OffenceDetails { offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), @@ -2624,40 +2716,42 @@ fn deferred_slashes_are_deferred() { &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(2); + mock::start_active_era(2); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(3); + mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // at the start of era 4, slashes from era 1 are processed, - // after being deferred for at least 2 full eras. - mock::start_era(4); + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_active_era(4); - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); - }) + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + }) } #[test] fn remove_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + ExtBuilder::default() + .slash_defer_duration(2) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( + on_offence_now( &[ OffenceDetails { offender: (11, exposure.clone()), @@ -2667,12 +2761,12 @@ fn remove_deferred() { &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(2); + mock::start_active_era(2); - on_offence_in_era( + on_offence_in_era( &[ OffenceDetails { offender: (11, exposure.clone()), @@ -2689,32 +2783,32 @@ fn remove_deferred() { Error::::EmptyTargets ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0])); + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0])); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(3); + mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // at the start of era 4, slashes from era 1 are processed, - // after being deferred for at least 2 full eras. - mock::start_era(4); + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_active_era(4); - // the first slash for 10% was cancelled, so no effect. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + // the first slash for 10% was cancelled, so no effect. + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(5); + mock::start_active_era(5); - let slash_10 = Perbill::from_percent(10); - let slash_15 = Perbill::from_percent(15); - let initial_slash = slash_10 * nominated_value; + let slash_10 = Perbill::from_percent(10); + let slash_15 = Perbill::from_percent(15); + let initial_slash = slash_10 * nominated_value; - let total_slash = slash_15 * nominated_value; - let actual_slash = total_slash - initial_slash; + let total_slash = slash_15 * nominated_value; + let actual_slash = total_slash - initial_slash; // 5% slash (15 - 10) processed now. assert_eq!(Balances::free_balance(11), 950); @@ -2724,15 +2818,17 @@ fn remove_deferred() { #[test] fn remove_multi_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + ExtBuilder::default() + .slash_defer_duration(2) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); - on_offence_now( + on_offence_now( &[ OffenceDetails { offender: (11, exposure.clone()), @@ -2899,7 +2995,7 @@ mod offchain_election { .session_per_era(3) .build() .execute_with(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Session::current_index(), 3); assert_eq!(Staking::current_era(), Some(1)); assert_eq!(Staking::is_current_session_final(), false); @@ -2921,7 +3017,7 @@ mod offchain_election { fn offchain_window_is_triggered() { ExtBuilder::default() .session_per_era(5) - .session_length(10) + .period(10) .election_lookahead(3) .build() .execute_with(|| { @@ -2981,7 +3077,7 @@ mod offchain_election { fn offchain_window_is_triggered_when_forcing() { ExtBuilder::default() .session_per_era(5) - .session_length(10) + .period(10) .election_lookahead(3) .build() .execute_with(|| { @@ -3002,11 +3098,10 @@ mod offchain_election { fn offchain_window_is_triggered_when_force_always() { ExtBuilder::default() .session_per_era(5) - .session_length(10) + .period(10) .election_lookahead(3) .build() .execute_with(|| { - ForceEra::put(Forcing::ForceAlways); run_to_block(16); assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); @@ -3029,7 +3124,7 @@ mod offchain_election { fn offchain_window_closes_when_forcenone() { ExtBuilder::default() .session_per_era(5) - .session_length(10) + .period(10) .election_lookahead(3) .build() .execute_with(|| { @@ -3086,7 +3181,7 @@ mod offchain_election { .into_iter() .map(|r| r.event) .filter_map(|e| { - if let MetaEvent::staking(inner) = e { + if let mock::Event::staking(inner) = e { Some(inner) } else { None @@ -3171,7 +3266,7 @@ mod offchain_election { .into_iter() .map(|r| r.event) .filter_map(|e| { - if let MetaEvent::staking(inner) = e { + if let mock::Event::staking(inner) = e { Some(inner) } else { None @@ -3190,7 +3285,7 @@ mod offchain_election { .into_iter() .map(|r| r.event) .filter_map(|e| { - if let MetaEvent::staking(inner) = e { + if let mock::Event::staking(inner) = e { Some(inner) } else { None @@ -3227,7 +3322,7 @@ mod offchain_election { .into_iter() .map(|r| r.event) .filter_map(|e| { - if let MetaEvent::staking(inner) = e { + if let mock::Event::staking(inner) = e { Some(inner) } else { None @@ -3267,7 +3362,7 @@ mod offchain_election { ElectionSize::default(), ), Error::::OffchainElectionEarlySubmission, - Some(::DbWeight::get().reads(1)), + Some(::DbWeight::get().reads(1)), ); }) } @@ -3303,7 +3398,7 @@ mod offchain_election { score, ), Error::::OffchainElectionWeakSubmission, - Some(::DbWeight::get().reads(3)) + Some(::DbWeight::get().reads(3)) ); }) } @@ -3363,6 +3458,7 @@ mod offchain_election { let call = extrinsic.call; let inner = match call { mock::Call::Staking(inner) => inner, + _ => unreachable!(), }; assert_eq!( @@ -3406,6 +3502,7 @@ mod offchain_election { let call = extrinsic.call; let inner = match call { mock::Call::Staking(inner) => inner, + _ => unreachable!(), }; assert_eq!( @@ -3453,6 +3550,7 @@ mod offchain_election { let call = extrinsic.call; let inner = match call { mock::Call::Staking(inner) => inner, + _ => unreachable!(), }; // pass this call to ValidateUnsigned @@ -4101,7 +4199,7 @@ mod offchain_election { #[test] fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21]); // pre-slash balance @@ -4149,7 +4247,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid // actually re-bond the slashed validator assert_ok!(Staking::validate(Origin::signed(10), Default::default())); - mock::start_era(2); + mock::start_active_era(2); let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); @@ -4180,31 +4278,28 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { >::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); - mock::start_era(1); + mock::start_active_era(1); >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); assert!(total_payout_1 != total_payout_0); - mock::start_era(2); + mock::start_active_era(2); >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change - let total_payout_2 = current_total_payout_for_duration(3000); - assert!(total_payout_2 > 10); // Test is meaningful if reward something + let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); assert!(total_payout_2 != total_payout_0); assert!(total_payout_2 != total_payout_1); - mock::start_era(Staking::history_depth() + 1); + mock::start_active_era(Staking::history_depth() + 1); let active_era = Staking::active_era().unwrap().index; @@ -4248,7 +4343,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { #[test] fn zero_slash_keeps_nominators() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); @@ -4285,12 +4380,13 @@ fn zero_slash_keeps_nominators() { #[test] fn six_session_delay() { - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().initialize_first_session(false).build_and_execute(|| { use pallet_session::SessionManager; let val_set = Session::validators(); let init_session = Session::current_index(); let init_active_era = Staking::active_era().unwrap().index; + // pallet-session is delaying session by one, thus the next session to plan is +2. assert_eq!(>::new_session(init_session + 2), None); assert_eq!(>::new_session(init_session + 3), Some(val_set.clone())); @@ -4300,10 +4396,11 @@ fn six_session_delay() { >::end_session(init_session); >::start_session(init_session + 1); - assert_eq!(Staking::active_era().unwrap().index, init_active_era); + assert_eq!(active_era(), init_active_era); + >::end_session(init_session + 1); >::start_session(init_session + 2); - assert_eq!(Staking::active_era().unwrap().index, init_active_era); + assert_eq!(active_era(), init_active_era); // Reward current era Staking::reward_by_ids(vec![(11, 1)]); @@ -4311,13 +4408,15 @@ fn six_session_delay() { // New active era is triggered here. >::end_session(init_session + 2); >::start_session(init_session + 3); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + assert_eq!(active_era(), init_active_era + 1); + >::end_session(init_session + 3); >::start_session(init_session + 4); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + assert_eq!(active_era(), init_active_era + 1); + >::end_session(init_session + 4); >::start_session(init_session + 5); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + assert_eq!(active_era(), init_active_era + 1); // Reward current era Staking::reward_by_ids(vec![(21, 2)]); @@ -4325,7 +4424,7 @@ fn six_session_delay() { // New active era is triggered here. >::end_session(init_session + 5); >::start_session(init_session + 6); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 2); + assert_eq!(active_era(), init_active_era + 2); // That reward are correct assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); @@ -4335,12 +4434,8 @@ fn six_session_delay() { #[test] fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward() { - // Test: - // * If nominator nomination is below the $MaxNominatorRewardedPerValidator other nominator - // then the nominator can't claim its reward - // * A nominator can't claim another nominator reward ExtBuilder::default().build_and_execute(|| { - for i in 0..=::MaxNominatorRewardedPerValidator::get() { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as AccountId; let controller = 20_000 + i as AccountId; let balance = 10_000 + i as Balance; @@ -4355,18 +4450,17 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( ); assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); } - mock::start_era(1); + mock::start_active_era(1); >::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); // Assert only nominators from 1 to Max are rewarded - for i in 0..=::MaxNominatorRewardedPerValidator::get() { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; if stash == 10_000 { @@ -4381,7 +4475,7 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( #[test] fn set_history_depth_works() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(10); + mock::start_active_era(10); Staking::set_history_depth(Origin::root(), 20, 0).unwrap(); assert!(::ErasTotalStake::contains_key(10 - 4)); assert!(::ErasTotalStake::contains_key(10 - 5)); @@ -4411,12 +4505,13 @@ fn test_payout_stakers() { bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); } - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Top 64 nominators of validator 11 automatically paid out, including the validator @@ -4438,10 +4533,11 @@ fn test_payout_stakers() { for i in 3..16 { Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(i); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); } @@ -4453,10 +4549,9 @@ fn test_payout_stakers() { for i in 16..100 { Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); } // We clean it up as history passes @@ -4491,12 +4586,13 @@ fn payout_stakers_handles_basic_errors() { bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); } - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); // Wrong Era, too big assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward); @@ -4505,10 +4601,9 @@ fn payout_stakers_handles_basic_errors() { for i in 3..100 { Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); } // We are at era 99, with history depth of 84 // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. @@ -4538,7 +4633,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { claimed_rewards: vec![], }) ); - mock::start_era(5); + mock::start_active_era(5); bond_validator(11, 10, 1000); assert_eq!( Staking::ledger(&10), @@ -4550,7 +4645,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { claimed_rewards: (0..5).collect(), }) ); - mock::start_era(99); + mock::start_active_era(99); bond_validator(13, 12, 1000); assert_eq!( Staking::ledger(&12), @@ -4569,14 +4664,14 @@ fn bond_during_era_correctly_populates_claimed_rewards() { fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write - let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); + let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), Ok(zero_offence_weight)); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes - let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(4, 5); + let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(4, 5); - let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> + let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> = (1..10).map(|i| OffenceDetails { offender: (i, Staking::eras_stakers(Staking::active_era().unwrap().index, i)), @@ -4595,14 +4690,14 @@ fn offences_weight_calculated_correctly() { let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes - let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(rw, rw) + let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(rw, rw) // One `slash_cost` - + ::DbWeight::get().reads_writes(6, 5) + + ::DbWeight::get().reads_writes(6, 5) // `slash_cost` * nominators (1) - + ::DbWeight::get().reads_writes(6, 5) + + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) - + ::DbWeight::get().reads_writes(2, 2); + + ::DbWeight::get().reads_writes(2, 2); assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0), Ok(one_offence_unapplied_weight)); }); @@ -4614,7 +4709,7 @@ fn on_initialize_weight_is_correct() { assert_eq!(Validators::::iter().count(), 0); assert_eq!(Nominators::::iter().count(), 0); // When this pallet has nothing, we do 4 reads each block - let base_weight = ::DbWeight::get().reads(4); + let base_weight = ::DbWeight::get().reads(4); assert_eq!(base_weight, Staking::on_initialize(0)); }); @@ -4636,7 +4731,7 @@ fn on_initialize_weight_is_correct() { // With 4 validators and 5 nominator, we should increase weight by: // - (4 + 5) reads // - 3 Writes - let final_weight = ::DbWeight::get().reads_writes(4 + 9, 3); + let final_weight = ::DbWeight::get().reads_writes(4 + 9, 3); assert_eq!(final_weight, Staking::on_initialize(System::block_number())); }); } @@ -4655,12 +4750,11 @@ fn payout_creates_controller() { assert_ok!(Balances::transfer(Origin::signed(1337), 1234, 100)); assert_eq!(Balances::free_balance(1337), 0); - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Controller is created @@ -4684,15 +4778,203 @@ fn payout_to_any_account_works() { // Reward Destination account doesn't exist assert_eq!(Balances::free_balance(42), 0); - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Payment is successful assert!(Balances::free_balance(42) > 0); }) } + +#[test] +fn session_buffering_with_offset() { + // similar to live-chains, have some offset for the first session + ExtBuilder::default() + .offset(2) + .period(5) + .session_per_era(5) + .build_and_execute(|| { + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + + start_session(1); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 1); + assert_eq!(System::block_number(), 2); + + start_session(2); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 2); + assert_eq!(System::block_number(), 7); + + start_session(3); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 3); + assert_eq!(System::block_number(), 12); + + // active era is lagging behind by one session, because of how session module works. + start_session(4); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 4); + assert_eq!(System::block_number(), 17); + + start_session(5); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 1); + assert_eq!(Session::current_index(), 5); + assert_eq!(System::block_number(), 22); + + // go all the way to active 2. + start_active_era(2); + assert_eq!(current_era(), 2); + assert_eq!(active_era(), 2); + assert_eq!(Session::current_index(), 10); + + }); +} + +#[test] +fn session_buffering_no_offset() { + // no offset, first session starts immediately + ExtBuilder::default() + .offset(0) + .period(5) + .session_per_era(5) + .build_and_execute(|| { + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + + start_session(1); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 1); + assert_eq!(System::block_number(), 5); + + start_session(2); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 2); + assert_eq!(System::block_number(), 10); + + start_session(3); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 3); + assert_eq!(System::block_number(), 15); + + // active era is lagging behind by one session, because of how session module works. + start_session(4); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 4); + assert_eq!(System::block_number(), 20); + + start_session(5); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 1); + assert_eq!(Session::current_index(), 5); + assert_eq!(System::block_number(), 25); + + // go all the way to active 2. + start_active_era(2); + assert_eq!(current_era(), 2); + assert_eq!(active_era(), 2); + assert_eq!(Session::current_index(), 10); + + }); +} + +#[test] +fn cannot_rebond_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(10) + .build_and_execute(|| { + // stash must have more balance than bonded for this to work. + assert_eq!(Balances::free_balance(&21), 512_000); + + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it. + assert_ok!(Staking::unbond(Origin::signed(20), 1000)); + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 0, + unlocking: vec![UnlockChunk { value: 1000, era: 3 }], + claimed_rewards: vec![] + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::rebond(Origin::signed(20), 5), + Error::::InsufficientValue, + ); + }) +} + +#[test] +fn cannot_bond_extra_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(10) + .build_and_execute(|| { + // stash must have more balance than bonded for this to work. + assert_eq!(Balances::free_balance(&21), 512_000); + + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it. + assert_ok!(Staking::unbond(Origin::signed(20), 1000)); + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 0, + unlocking: vec![UnlockChunk { + value: 1000, + era: 3 + }], + claimed_rewards: vec![] + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::bond_extra(Origin::signed(21), 5), + Error::::InsufficientValue, + ); + }) +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index cb301276e0f05..b70563ccf41b3 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_staking -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_staking +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 +//! DATE: 2021-01-19, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -46,10 +47,11 @@ pub trait WeightInfo { fn bond() -> Weight; fn bond_extra() -> Weight; fn unbond() -> Weight; - fn withdraw_unbonded_update(_s: u32, ) -> Weight; - fn withdraw_unbonded_kill(_s: u32, ) -> Weight; + fn withdraw_unbonded_update(s: u32, ) -> Weight; + fn withdraw_unbonded_kill(s: u32, ) -> Weight; fn validate() -> Weight; - fn nominate(_n: u32, ) -> Weight; + fn kick(k: u32, ) -> Weight; + fn nominate(n: u32, ) -> Weight; fn chill() -> Weight; fn set_payee() -> Weight; fn set_controller() -> Weight; @@ -57,167 +59,172 @@ pub trait WeightInfo { fn force_no_eras() -> Weight; fn force_new_era() -> Weight; fn force_new_era_always() -> Weight; - fn set_invulnerables(_v: u32, ) -> Weight; - fn force_unstake(_s: u32, ) -> Weight; - fn cancel_deferred_slash(_s: u32, ) -> Weight; - fn payout_stakers_dead_controller(_n: u32, ) -> Weight; - fn payout_stakers_alive_staked(_n: u32, ) -> Weight; - fn rebond(_l: u32, ) -> Weight; - fn set_history_depth(_e: u32, ) -> Weight; - fn reap_stash(_s: u32, ) -> Weight; - fn new_era(_v: u32, _n: u32, ) -> Weight; - fn submit_solution_better(_v: u32, _n: u32, _a: u32, _w: u32, ) -> Weight; - + fn set_invulnerables(v: u32, ) -> Weight; + fn force_unstake(s: u32, ) -> Weight; + fn cancel_deferred_slash(s: u32, ) -> Weight; + fn payout_stakers_dead_controller(n: u32, ) -> Weight; + fn payout_stakers_alive_staked(n: u32, ) -> Weight; + fn rebond(l: u32, ) -> Weight; + fn set_history_depth(e: u32, ) -> Weight; + fn reap_stash(s: u32, ) -> Weight; + fn new_era(v: u32, n: u32, ) -> Weight; + fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight; } /// Weights for pallet_staking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (99_659_000 as Weight) + (76_281_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } fn bond_extra() -> Weight { - (79_045_000 as Weight) + (62_062_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn unbond() -> Weight { - (71_716_000 as Weight) + (57_195_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (72_835_000 as Weight) - .saturating_add((63_000 as Weight).saturating_mul(s as Weight)) + (58_043_000 as Weight) + // Standard Error: 1_000 + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (118_239_000 as Weight) - .saturating_add((3_910_000 as Weight).saturating_mul(s as Weight)) + (89_920_000 as Weight) + // Standard Error: 3_000 + .saturating_add((2_526_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (25_691_000 as Weight) + (20_228_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - + } + fn kick(k: u32, ) -> Weight { + (31_066_000 as Weight) + // Standard Error: 11_000 + .saturating_add((17_754_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (35_374_000 as Weight) - .saturating_add((203_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + (33_494_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_253_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn chill() -> Weight { - (25_227_000 as Weight) + (19_396_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn set_payee() -> Weight { - (17_601_000 as Weight) + (13_449_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_controller() -> Weight { - (37_514_000 as Weight) + (29_184_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn set_validator_count() -> Weight { - (3_338_000 as Weight) + (2_266_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn force_no_eras() -> Weight { - (3_869_000 as Weight) + (2_462_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn force_new_era() -> Weight { - (3_795_000 as Weight) + (2_483_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn force_new_era_always() -> Weight { - (3_829_000 as Weight) + (2_495_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_invulnerables(v: u32, ) -> Weight { - (4_087_000 as Weight) + (2_712_000 as Weight) + // Standard Error: 0 .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn force_unstake(s: u32, ) -> Weight { - (81_063_000 as Weight) - .saturating_add((3_872_000 as Weight).saturating_mul(s as Weight)) + (60_508_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_525_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_840_640_000 as Weight) - .saturating_add((34_806_000 as Weight).saturating_mul(s as Weight)) + (5_886_772_000 as Weight) + // Standard Error: 393_000 + .saturating_add((34_849_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (153_024_000 as Weight) - .saturating_add((59_909_000 as Weight).saturating_mul(n as Weight)) + (127_627_000 as Weight) + // Standard Error: 27_000 + .saturating_add((49_354_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (196_058_000 as Weight) - .saturating_add((78_955_000 as Weight).saturating_mul(n as Weight)) + (156_838_000 as Weight) + // Standard Error: 24_000 + .saturating_add((62_653_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (49_966_000 as Weight) - .saturating_add((92_000 as Weight).saturating_mul(l as Weight)) + (40_110_000 as Weight) + // Standard Error: 1_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - .saturating_add((38_529_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 70_000 + .saturating_add((32_883_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (101_457_000 as Weight) - .saturating_add((3_914_000 as Weight).saturating_mul(s as Weight)) + (64_605_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_506_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - .saturating_add((948_467_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((117_579_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) + // Standard Error: 926_000 + .saturating_add((548_212_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 46_000 + .saturating_add((78_343_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(8 as Weight)) @@ -225,166 +232,174 @@ impl WeightInfo for SubstrateWeight { } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_728_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((907_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((99_762_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((9_017_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 48_000 + .saturating_add((937_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 19_000 + .saturating_add((657_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 48_000 + .saturating_add((70_669_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 101_000 + .saturating_add((7_658_000 as Weight).saturating_mul(w as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (99_659_000 as Weight) + (76_281_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } fn bond_extra() -> Weight { - (79_045_000 as Weight) + (62_062_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn unbond() -> Weight { - (71_716_000 as Weight) + (57_195_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (72_835_000 as Weight) - .saturating_add((63_000 as Weight).saturating_mul(s as Weight)) + (58_043_000 as Weight) + // Standard Error: 1_000 + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (118_239_000 as Weight) - .saturating_add((3_910_000 as Weight).saturating_mul(s as Weight)) + (89_920_000 as Weight) + // Standard Error: 3_000 + .saturating_add((2_526_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (25_691_000 as Weight) + (20_228_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - + } + fn kick(k: u32, ) -> Weight { + (31_066_000 as Weight) + // Standard Error: 11_000 + .saturating_add((17_754_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (35_374_000 as Weight) - .saturating_add((203_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + (33_494_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_253_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn chill() -> Weight { - (25_227_000 as Weight) + (19_396_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn set_payee() -> Weight { - (17_601_000 as Weight) + (13_449_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_controller() -> Weight { - (37_514_000 as Weight) + (29_184_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn set_validator_count() -> Weight { - (3_338_000 as Weight) + (2_266_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn force_no_eras() -> Weight { - (3_869_000 as Weight) + (2_462_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn force_new_era() -> Weight { - (3_795_000 as Weight) + (2_483_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn force_new_era_always() -> Weight { - (3_829_000 as Weight) + (2_495_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_invulnerables(v: u32, ) -> Weight { - (4_087_000 as Weight) + (2_712_000 as Weight) + // Standard Error: 0 .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn force_unstake(s: u32, ) -> Weight { - (81_063_000 as Weight) - .saturating_add((3_872_000 as Weight).saturating_mul(s as Weight)) + (60_508_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_525_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_840_640_000 as Weight) - .saturating_add((34_806_000 as Weight).saturating_mul(s as Weight)) + (5_886_772_000 as Weight) + // Standard Error: 393_000 + .saturating_add((34_849_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (153_024_000 as Weight) - .saturating_add((59_909_000 as Weight).saturating_mul(n as Weight)) + (127_627_000 as Weight) + // Standard Error: 27_000 + .saturating_add((49_354_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (196_058_000 as Weight) - .saturating_add((78_955_000 as Weight).saturating_mul(n as Weight)) + (156_838_000 as Weight) + // Standard Error: 24_000 + .saturating_add((62_653_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(12 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (49_966_000 as Weight) - .saturating_add((92_000 as Weight).saturating_mul(l as Weight)) + (40_110_000 as Weight) + // Standard Error: 1_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - .saturating_add((38_529_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 70_000 + .saturating_add((32_883_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (101_457_000 as Weight) - .saturating_add((3_914_000 as Weight).saturating_mul(s as Weight)) + (64_605_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_506_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - .saturating_add((948_467_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((117_579_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(10 as Weight)) + // Standard Error: 926_000 + .saturating_add((548_212_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 46_000 + .saturating_add((78_343_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) @@ -392,15 +407,17 @@ impl WeightInfo for () { } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_728_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((907_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((99_762_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((9_017_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 48_000 + .saturating_add((937_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 19_000 + .saturating_add((657_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 48_000 + .saturating_add((70_669_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 101_000 + .saturating_add((7_658_000 as Weight).saturating_mul(w as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - } diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 4713baea518f9..ed19d2e16535a 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sudo" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/sudo/README.md b/frame/sudo/README.md index 233727ac1bd28..95ca7ce88d972 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -38,10 +38,10 @@ This is an example of a module that exposes a privileged function: use frame_support::{decl_module, dispatch}; use frame_system::ensure_root; -pub trait Trait: frame_system::Trait {} +pub trait Config: frame_system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn privileged_function(origin) -> dispatch::DispatchResult { ensure_root(origin)?; @@ -64,7 +64,7 @@ You need to set an initial superuser account as the sudo `key`. * [Democracy](https://docs.rs/pallet-democracy/latest/pallet_democracy/) [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html [`Origin`]: https://docs.substrate.dev/docs/substrate-types -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 0d21e44326668..c7cc38a81c134 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Sudo Module //! -//! - [`sudo::Trait`](./trait.Trait.html) +//! - [`sudo::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -55,10 +55,10 @@ //! use frame_support::{decl_module, dispatch}; //! use frame_system::ensure_root; //! -//! pub trait Trait: frame_system::Trait {} +//! pub trait Config: frame_system::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn privileged_function(origin) -> dispatch::DispatchResult { //! ensure_root(origin)?; @@ -82,7 +82,7 @@ //! * [Democracy](../pallet_democracy/index.html) //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html //! [`Origin`]: https://docs.substrate.dev/docs/substrate-types #![cfg_attr(not(feature = "std"), no_std)] @@ -105,9 +105,9 @@ mod mock; #[cfg(test)] mod tests; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// A sudo-able call. type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; @@ -115,7 +115,7 @@ pub trait Trait: frame_system::Trait { decl_module! { /// Sudo module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -130,8 +130,11 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = (call.get_dispatch_info().weight + 10_000, call.get_dispatch_info().class)] - fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { + #[weight = { + let dispatch_info = call.get_dispatch_info(); + (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) + }] + fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); @@ -153,7 +156,7 @@ decl_module! { /// - The weight of this call is defined by the caller. /// # #[weight = (*_weight, call.get_dispatch_info().class)] - fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { + fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); @@ -197,16 +200,19 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = ( - call.get_dispatch_info().weight - .saturating_add(10_000) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class - )] + #[weight = { + let dispatch_info = call.get_dispatch_info(); + ( + dispatch_info.weight + .saturating_add(10_000) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + }] fn sudo_as(origin, who: ::Source, - call: Box<::Call> + call: Box<::Call> ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -214,15 +220,9 @@ decl_module! { let who = T::Lookup::lookup(who)?; - let res = match call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()) { - Ok(_) => true, - Err(e) => { - sp_runtime::print(e); - false - } - }; + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - Self::deposit_event(RawEvent::SudoAsDone(res)); + Self::deposit_event(RawEvent::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -230,18 +230,18 @@ decl_module! { } decl_event!( - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// A sudo just took place. \[result\] Sudid(DispatchResult), /// The \[sudoer\] just switched identity; the old key is supplied. KeyChanged(AccountId), /// A sudo just took place. \[result\] - SudoAsDone(bool), + SudoAsDone(DispatchResult), } ); decl_storage! { - trait Store for Module as Sudo { + trait Store for Module as Sudo { /// The `AccountId` of the sudo key. Key get(fn key) config(): T::AccountId; } @@ -249,7 +249,7 @@ decl_storage! { decl_error! { /// Error for the Sudo module - pub enum Error for Module { + pub enum Error for Module { /// Sender must be the Sudo account RequireSudo, } diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 7996cd05d071f..91cd03ac4756a 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,41 +18,39 @@ //! Test utilities use super::*; -use frame_support::{ - impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, -}; +use frame_support::{parameter_types, weights::Weight}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use sp_io; use crate as sudo; use frame_support::traits::Filter; +use frame_system::limits; // Logger module to track execution. pub mod logger { use super::*; use frame_system::ensure_root; - pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; + pub trait Config: frame_system::Config { + type Event: From> + Into<::Event>; } decl_storage! { - trait Store for Module as Logger { + trait Store for Module as Logger { AccountLog get(fn account_log): Vec; I32Log get(fn i32_log): Vec; } } decl_event! { - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { AppendI32(i32, Weight), AppendI32AndAccount(AccountId, i32, Weight), } } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { fn deposit_event() = default; #[weight = *weight] @@ -75,40 +73,24 @@ pub mod logger { } } -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -mod test_events { - pub use crate::Event; -} - -impl_outer_event! { - pub enum TestEvent for Test { - frame_system, - sudo, - logger, - } -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - sudo::Sudo, - logger::Logger, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Sudo: sudo::{Module, Call, Config, Storage, Event}, + Logger: logger::{Module, Call, Storage, Event}, } -} - -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(1024); } pub struct BlockEverything; @@ -118,8 +100,11 @@ impl Filter for BlockEverything { } } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BlockEverything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -129,39 +114,28 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -// Implement the logger module's `Trait` on the Test runtime. -impl logger::Trait for Test { - type Event = TestEvent; +// Implement the logger module's `Config` on the Test runtime. +impl logger::Config for Test { + type Event = Event; } -// Implement the sudo module's `Trait` on the Test runtime. -impl Trait for Test { - type Event = TestEvent; +// Implement the sudo module's `Config` on the Test runtime. +impl Config for Test { + type Event = Event; type Call = Call; } -// Assign back to type variables in order to make dispatched calls of these modules later. -pub type Sudo = Module; -pub type Logger = logger::Module; -pub type System = frame_system::Module; - // New types for dispatchable functions. pub type SudoCall = sudo::Call; pub type LoggerCall = logger::Call; @@ -169,7 +143,7 @@ pub type LoggerCall = logger::Call; // Build test environment by setting the root `key` for the Genesis. pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig::{ + sudo::GenesisConfig::{ key: root_key, }.assimilate_storage(&mut t).unwrap(); t.into() diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index cba1e1cf60540..4d2552b7b88b4 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,8 @@ use super::*; use mock::{ - Sudo, SudoCall, Origin, Call, Test, new_test_ext, LoggerCall, Logger, System, TestEvent, + Sudo, SudoCall, Origin, Call, Test, new_test_ext, LoggerCall, Logger, System, + Event as TestEvent, }; use frame_support::{assert_ok, assert_noop}; @@ -163,7 +164,7 @@ fn sudo_as_emits_events_correctly() { // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(true)); + let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }); } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 1f7fe9a202538..8edf1ff6ddadc 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,29 +15,30 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "12.0.0", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../../primitives/tracing" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0", default-features = false, path = "./procedural" } -paste = "0.1.6" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-metadata = { version = "13.0.0", default-features = false, path = "../metadata" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "3.0.0", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } +frame-support-procedural = { version = "3.0.0", default-features = false, path = "./procedural" } +paste = "1.0" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.9.0", optional = true, path = "../../primitives/state-machine" } bitflags = "1.2" -impl-trait-for-tuples = "0.1.3" +impl-trait-for-tuples = "0.2.1" smallvec = "1.4.1" [dev-dependencies] pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0", path = "../system" } -parity-util-mem = { version = "0.7.0", features = ["primitive-types"] } +frame-system = { version = "3.0.0", path = "../system" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } +sp-api = { version = "3.0.0", default-features = false, path = "../../primitives/api" } [features] default = ["std"] @@ -52,6 +53,7 @@ std = [ "sp-arithmetic/std", "frame-metadata/std", "sp-inherents/std", + "sp-staking/std", "sp-state-machine", "frame-support-procedural/std", ] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 70662d710775d..4a00a24e3849d 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,10 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0", path = "./tools" } +frame-support-procedural-tools = { version = "3.0.0", path = "./tools" } proc-macro2 = "1.0.6" quote = "1.0.3" -syn = { version = "1.0.7", features = ["full"] } +Inflector = "0.11.4" +syn = { version = "1.0.58", features = ["full"] } [features] default = ["std"] diff --git a/frame/support/procedural/src/clone_no_bound.rs b/frame/support/procedural/src/clone_no_bound.rs index 35854d23f4dbd..1911fdfd9fb29 100644 --- a/frame/support/procedural/src/clone_no_bound.rs +++ b/frame/support/procedural/src/clone_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 15f0935f38233..abd68e4425d89 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -62,6 +62,7 @@ impl Module { fn complete_modules(decl: impl Iterator) -> syn::Result> { let mut indices = HashMap::new(); let mut last_index: Option = None; + let mut names = HashMap::new(); decl .map(|module| { @@ -88,6 +89,14 @@ fn complete_modules(decl: impl Iterator) -> syn::Resul return Err(err); } + if let Some(used_module) = names.insert(module.name.clone(), module.name.span()) { + let msg = "Two modules with the same name!"; + + let mut err = syn::Error::new(used_module, &msg); + err.combine(syn::Error::new(module.name.span(), &msg)); + return Err(err); + } + Ok(Module { name: module.name, index: final_index, @@ -205,7 +214,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result( @@ -299,7 +308,7 @@ fn decl_runtime_metadata<'a>( module_declaration.find_part("Module").map(|_| { let filtered_names: Vec<_> = module_declaration .module_parts() - .into_iter() + .iter() .filter(|part| part.name() != "Module") .map(|part| part.ident()) .collect(); @@ -339,7 +348,7 @@ fn decl_outer_dispatch<'a>( .map(|module_declaration| { let module = &module_declaration.module; let name = &module_declaration.name; - let index = module_declaration.index.to_string(); + let index = module_declaration.index; quote!(#[codec(index = #index)] #module::#name) }); @@ -360,29 +369,26 @@ fn decl_outer_origin<'a>( ) -> syn::Result { let mut modules_tokens = TokenStream2::new(); for module_declaration in modules_except_system { - match module_declaration.find_part("Origin") { - Some(module_entry) => { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; - if instance.is_some() && generics.params.len() == 0 { - let msg = format!( - "Instantiable module with no generic `Origin` cannot \ - be constructed: module `{}` must have generic `Origin`", - module_declaration.name - ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); - } - let index = module_declaration.index.to_string(); - let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); - modules_tokens.extend(tokens); + if let Some(module_entry) = module_declaration.find_part("Origin") { + let module = &module_declaration.module; + let instance = module_declaration.instance.as_ref(); + let generics = &module_entry.generics; + if instance.is_some() && generics.params.is_empty() { + let msg = format!( + "Instantiable module with no generic `Origin` cannot \ + be constructed: module `{}` must have generic `Origin`", + module_declaration.name + ); + return Err(syn::Error::new(module_declaration.name.span(), msg)); } - None => {} + let index = module_declaration.index; + let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); + modules_tokens.extend(tokens); } } let system_name = &system_module.module; - let system_index = system_module.index.to_string(); + let system_index = system_module.index; Ok(quote!( #scrate::impl_outer_origin! { @@ -403,25 +409,22 @@ fn decl_outer_event<'a>( ) -> syn::Result { let mut modules_tokens = TokenStream2::new(); for module_declaration in module_declarations { - match module_declaration.find_part("Event") { - Some(module_entry) => { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; - if instance.is_some() && generics.params.len() == 0 { - let msg = format!( - "Instantiable module with no generic `Event` cannot \ - be constructed: module `{}` must have generic `Event`", - module_declaration.name, - ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); - } - - let index = module_declaration.index.to_string(); - let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); - modules_tokens.extend(tokens); + if let Some(module_entry) = module_declaration.find_part("Event") { + let module = &module_declaration.module; + let instance = module_declaration.instance.as_ref(); + let generics = &module_entry.generics; + if instance.is_some() && generics.params.is_empty() { + let msg = format!( + "Instantiable module with no generic `Event` cannot \ + be constructed: module `{}` must have generic `Event`", + module_declaration.name, + ); + return Err(syn::Error::new(module_declaration.name.span(), msg)); } - None => {} + + let index = module_declaration.index; + let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); + modules_tokens.extend(tokens); } } @@ -467,8 +470,11 @@ fn decl_all_modules<'a>( quote!( #types - type AllModules = ( #all_modules ); - type AllModulesWithSystem = ( #all_modules_with_system ); + /// All pallets included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + pub type AllModules = ( #all_modules ); + /// All pallets included in the runtime as a nested tuple of types. + pub type AllModulesWithSystem = ( #all_modules_with_system ); ) } diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 4a45044d67f25..6d4ba6cdbf743 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -333,7 +333,7 @@ impl Parse for ModulePart { impl ModulePart { pub fn format_names(names: &[&'static str]) -> String { - let res: Vec<_> = names.into_iter().map(|s| format!("`{}`", s)).collect(); + let res: Vec<_> = names.iter().map(|s| format!("`{}`", s)).collect(); res.join(", ") } diff --git a/frame/support/procedural/src/debug_no_bound.rs b/frame/support/procedural/src/debug_no_bound.rs index 2a818fb205fb8..7a5509cf986dc 100644 --- a/frame/support/procedural/src/debug_no_bound.rs +++ b/frame/support/procedural/src/debug_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index a6fb58846cbad..2c2cdf00a0453 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,12 +21,14 @@ mod storage; mod construct_runtime; +mod pallet; mod pallet_version; mod transactional; mod debug_no_bound; mod clone_no_bound; mod partial_eq_no_bound; +pub(crate) use storage::INHERENT_INSTANCE_NAME; use proc_macro::TokenStream; /// Declares strongly-typed wrappers around codec-compatible types in storage. @@ -35,7 +37,7 @@ use proc_macro::TokenStream; /// /// ```nocompile /// decl_storage! { -/// trait Store for Module as Example { +/// trait Store for Module as Example { /// Foo get(fn foo) config(): u32=12; /// Bar: map hasher(identity) u32 => u32; /// pub Zed build(|config| vec![(0, 0)]): map hasher(identity) u32 => u32; @@ -43,7 +45,7 @@ use proc_macro::TokenStream; /// } /// ``` /// -/// Declaration is set with the header `(pub) trait Store for Module as Example`, +/// Declaration is set with the header `(pub) trait Store for Module as Example`, /// with `Store` a (pub) trait generated associating each storage item to the `Module` and /// `as Example` setting the prefix used for storage items of this module. `Example` must be unique: /// another module with the same name and the same inner storage item name will conflict. @@ -169,7 +171,7 @@ use proc_macro::TokenStream; /// /// ```nocompile /// decl_storage! { -/// trait Store for Module as Example { +/// trait Store for Module as Example { /// /// // Your storage items /// } @@ -202,7 +204,7 @@ use proc_macro::TokenStream; /// (`DefaultInstance` type is optional): /// /// ```nocompile -/// trait Store for Module, I: Instance=DefaultInstance> as Example {} +/// trait Store for Module, I: Instance=DefaultInstance> as Example {} /// ``` /// /// Accessing the structure no requires the instance as generic parameter: @@ -214,7 +216,7 @@ use proc_macro::TokenStream; /// This macro supports a where clause which will be replicated to all generated types. /// /// ```nocompile -/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} +/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} /// ``` /// /// ## Limitations @@ -300,11 +302,22 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// The population of the genesis storage depends on the order of modules. So, if one of your /// modules depends on another module, the module that is depended upon needs to come before /// the module depending on it. +/// +/// # Type definitions +/// +/// * The macro generates a type alias for each pallet to their `Module` (or `Pallet`). +/// E.g. `type System = frame_system::Module` #[proc_macro] pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } +/// Macro to define a pallet. Docs are at `frame_support::pallet`. +#[proc_macro_attribute] +pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { + pallet::pallet(attr, item) +} + /// Execute the annotated function in a new storage transaction. /// /// The return type of the annotated function must be `Result`. All changes to storage performed diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs new file mode 100644 index 0000000000000..830fd267dc9b0 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use frame_support_procedural_tools::clean_type_string; +use syn::spanned::Spanned; + +/// * Generate enum call and implement various trait on it. +/// * Implement Callable and call_function on `Pallet` +pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(def.call.attr_span); + let type_decl_bounded_gen = &def.type_decl_bounded_generics(def.call.attr_span); + let type_use_gen = &def.type_use_generics(def.call.attr_span); + let call_ident = syn::Ident::new("Call", def.call.attr_span); + let pallet_ident = &def.pallet_struct.pallet; + let where_clause = &def.call.where_clause; + + let fn_name = def.call.methods.iter().map(|method| &method.name).collect::>(); + + let fn_weight = def.call.methods.iter().map(|method| &method.weight); + + let fn_doc = def.call.methods.iter().map(|method| &method.docs).collect::>(); + + let args_name = def.call.methods.iter() + .map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::>()) + .collect::>(); + + let args_type = def.call.methods.iter() + .map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::>()) + .collect::>(); + + let args_compact_attr = def.call.methods.iter().map(|method| { + method.args.iter() + .map(|(is_compact, _, type_)| { + if *is_compact { + quote::quote_spanned!(type_.span() => #[codec(compact)] ) + } else { + quote::quote!() + } + }) + .collect::>() + }); + + let args_metadata_type = def.call.methods.iter().map(|method| { + method.args.iter() + .map(|(is_compact, _, type_)| { + let final_type = if *is_compact { + quote::quote_spanned!(type_.span() => Compact<#type_>) + } else { + quote::quote!(#type_) + }; + clean_type_string(&final_type.to_string()) + }) + .collect::>() + }); + + quote::quote_spanned!(def.call.attr_span => + #[derive( + #frame_support::RuntimeDebugNoBound, + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::codec::Encode, + #frame_support::codec::Decode, + )] + #[allow(non_camel_case_types)] + pub enum #call_ident<#type_decl_bounded_gen> #where_clause { + #[doc(hidden)] + #[codec(skip)] + __Ignore( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen,)>, + #frame_support::Never, + ), + #( #fn_name( #( #args_compact_attr #args_type ),* ), )* + } + + impl<#type_impl_gen> #frame_support::dispatch::GetDispatchInfo + for #call_ident<#type_use_gen> + #where_clause + { + fn get_dispatch_info(&self) -> #frame_support::dispatch::DispatchInfo { + match *self { + #( + Self::#fn_name ( #( ref #args_name, )* ) => { + let base_weight = #fn_weight; + + let weight = < + dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )> + >::weigh_data(&base_weight, ( #( #args_name, )* )); + + let class = < + dyn #frame_support::dispatch::ClassifyDispatch< + ( #( & #args_type, )* ) + > + >::classify_dispatch(&base_weight, ( #( #args_name, )* )); + + let pays_fee = < + dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )> + >::pays_fee(&base_weight, ( #( #args_name, )* )); + + #frame_support::dispatch::DispatchInfo { + weight, + class, + pays_fee, + } + }, + )* + Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + } + } + } + + impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen> + #where_clause + { + fn get_call_name(&self) -> &'static str { + match *self { + #( Self::#fn_name(..) => stringify!(#fn_name), )* + Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."), + } + } + + fn get_call_names() -> &'static [&'static str] { + &[ #( stringify!(#fn_name), )* ] + } + } + + impl<#type_impl_gen> #frame_support::traits::UnfilteredDispatchable + for #call_ident<#type_use_gen> + #where_clause + { + type Origin = #frame_system::pallet_prelude::OriginFor; + fn dispatch_bypass_filter( + self, + origin: Self::Origin + ) -> #frame_support::dispatch::DispatchResultWithPostInfo { + match self { + #( + Self::#fn_name( #( #args_name, )* ) => + <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) + .map(Into::into).map_err(Into::into), + )* + Self::__Ignore(_, _) => { + let _ = origin; // Use origin for empty Call enum + unreachable!("__PhantomItem cannot be used."); + }, + } + } + } + + impl<#type_impl_gen> #frame_support::dispatch::Callable for #pallet_ident<#type_use_gen> + #where_clause + { + type Call = #call_ident<#type_use_gen>; + } + + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { + #[doc(hidden)] + pub fn call_functions() -> &'static [#frame_support::dispatch::FunctionMetadata] { + &[ #( + #frame_support::dispatch::FunctionMetadata { + name: #frame_support::dispatch::DecodeDifferent::Encode( + stringify!(#fn_name) + ), + arguments: #frame_support::dispatch::DecodeDifferent::Encode( + &[ #( + #frame_support::dispatch::FunctionArgumentMetadata { + name: #frame_support::dispatch::DecodeDifferent::Encode( + stringify!(#args_name) + ), + ty: #frame_support::dispatch::DecodeDifferent::Encode( + #args_metadata_type + ), + }, + )* ] + ), + documentation: #frame_support::dispatch::DecodeDifferent::Encode( + &[ #( #fn_doc ),* ] + ), + }, + )* ] + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/constants.rs b/frame/support/procedural/src/pallet/expand/constants.rs new file mode 100644 index 0000000000000..e5acf42270aa7 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/constants.rs @@ -0,0 +1,138 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use frame_support_procedural_tools::clean_type_string; +use quote::ToTokens; + +struct ConstDef { + /// Name of the associated type. + pub ident: syn::Ident, + /// The type in Get, e.g. `u32` in `type Foo: Get;`, but `Self` is replaced by `T` + pub type_: syn::Type, + /// The doc associated + pub doc: Vec, + /// default_byte implementation + pub default_byte_impl: proc_macro2::TokenStream, +} + +/// * Impl fn module_constant_metadata for pallet. +pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); + let type_decl_gen = &def.type_decl_generics(proc_macro2::Span::call_site()); + let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); + let pallet_ident = &def.pallet_struct.pallet; + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.extra_constants.iter().map(|d| &d.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + + let config_consts = def.config.consts_metadata.iter().map(|const_| { + let ident = &const_.ident; + let const_type = &const_.type_; + + ConstDef { + ident: const_.ident.clone(), + type_: const_.type_.clone(), + doc: const_.doc.clone(), + default_byte_impl: quote::quote!( + let value = >::get(); + #frame_support::codec::Encode::encode(&value) + ), + } + }); + + let extra_consts = def.extra_constants.iter().flat_map(|d| &d.extra_constants).map(|const_| { + let ident = &const_.ident; + + ConstDef { + ident: const_.ident.clone(), + type_: const_.type_.clone(), + doc: const_.doc.clone(), + default_byte_impl: quote::quote!( + let value = >::#ident(); + #frame_support::codec::Encode::encode(&value) + ), + } + }); + + let consts = config_consts.chain(extra_consts) + .map(|const_| { + let const_type = &const_.type_; + let const_type_str = clean_type_string(&const_type.to_token_stream().to_string()); + let ident = &const_.ident; + let ident_str = format!("{}", ident); + let doc = const_.doc.clone().into_iter(); + let default_byte_impl = &const_.default_byte_impl; + let default_byte_getter = syn::Ident::new( + &format!("{}DefaultByteGetter", ident), + ident.span() + ); + + quote::quote!({ + #[allow(non_upper_case_types)] + #[allow(non_camel_case_types)] + struct #default_byte_getter<#type_decl_gen>( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen)> + ); + + impl<#type_impl_gen> #frame_support::dispatch::DefaultByte for + #default_byte_getter<#type_use_gen> + #completed_where_clause + { + fn default_byte(&self) -> #frame_support::sp_std::vec::Vec { + #default_byte_impl + } + } + + unsafe impl<#type_impl_gen> Send for #default_byte_getter<#type_use_gen> + #completed_where_clause + {} + unsafe impl<#type_impl_gen> Sync for #default_byte_getter<#type_use_gen> + #completed_where_clause + {} + + #frame_support::dispatch::ModuleConstantMetadata { + name: #frame_support::dispatch::DecodeDifferent::Encode(#ident_str), + ty: #frame_support::dispatch::DecodeDifferent::Encode(#const_type_str), + value: #frame_support::dispatch::DecodeDifferent::Encode( + #frame_support::dispatch::DefaultByteGetter( + &#default_byte_getter::<#type_use_gen>( + #frame_support::sp_std::marker::PhantomData + ) + ) + ), + documentation: #frame_support::dispatch::DecodeDifferent::Encode( + &[ #( #doc ),* ] + ), + } + }) + }); + + quote::quote!( + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause{ + + #[doc(hidden)] + pub fn module_constants_metadata() + -> &'static [#frame_support::dispatch::ModuleConstantMetadata] + { + &[ #( #consts ),* ] + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs new file mode 100644 index 0000000000000..c8c0a3c0c4d58 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * impl various trait on Error +/// * impl ModuleErrorMetadata for Error +pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { + let error = if let Some(error) = &def.error { + error + } else { + return Default::default() + }; + + let error_ident = &error.error; + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(error.attr_span); + let type_use_gen = &def.type_use_generics(error.attr_span); + let config_where_clause = &def.config.where_clause; + + let phantom_variant: syn::Variant = syn::parse_quote!( + #[doc(hidden)] + __Ignore( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen)>, + #frame_support::Never, + ) + ); + + let as_u8_matches = error.variants.iter().enumerate() + .map(|(i, (variant, _))| { + quote::quote_spanned!(error.attr_span => Self::#variant => #i as u8,) + }); + + let as_str_matches = error.variants.iter() + .map(|(variant, _)| { + let variant_str = format!("{}", variant); + quote::quote_spanned!(error.attr_span => Self::#variant => #variant_str,) + }); + + let metadata = error.variants.iter() + .map(|(variant, doc)| { + let variant_str = format!("{}", variant); + quote::quote_spanned!(error.attr_span => + #frame_support::error::ErrorMetadata { + name: #frame_support::error::DecodeDifferent::Encode(#variant_str), + documentation: #frame_support::error::DecodeDifferent::Encode(&[ #( #doc, )* ]), + }, + ) + }); + + let error_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; + if let syn::Item::Enum(item) = item { + item + } else { + unreachable!("Checked by error parser") + } + }; + + error_item.variants.insert(0, phantom_variant); + + quote::quote_spanned!(error.attr_span => + impl<#type_impl_gen> #frame_support::sp_std::fmt::Debug for #error_ident<#type_use_gen> + #config_where_clause + { + fn fmt(&self, f: &mut #frame_support::sp_std::fmt::Formatter<'_>) + -> #frame_support::sp_std::fmt::Result + { + f.write_str(self.as_str()) + } + } + + impl<#type_impl_gen> #error_ident<#type_use_gen> #config_where_clause { + pub fn as_u8(&self) -> u8 { + match &self { + Self::__Ignore(_, _) => unreachable!("`__Ignore` can never be constructed"), + #( #as_u8_matches )* + } + } + + pub fn as_str(&self) -> &'static str { + match &self { + Self::__Ignore(_, _) => unreachable!("`__Ignore` can never be constructed"), + #( #as_str_matches )* + } + } + } + + impl<#type_impl_gen> From<#error_ident<#type_use_gen>> for &'static str + #config_where_clause + { + fn from(err: #error_ident<#type_use_gen>) -> &'static str { + err.as_str() + } + } + + impl<#type_impl_gen> From<#error_ident<#type_use_gen>> + for #frame_support::sp_runtime::DispatchError + #config_where_clause + { + fn from(err: #error_ident<#type_use_gen>) -> Self { + let index = < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::index::>() + .expect("Every active module has an index in the runtime; qed") as u8; + + #frame_support::sp_runtime::DispatchError::Module { + index, + error: err.as_u8(), + message: Some(err.as_str()), + } + } + } + + impl<#type_impl_gen> #frame_support::error::ModuleErrorMetadata + for #error_ident<#type_use_gen> + #config_where_clause + { + fn metadata() -> &'static [#frame_support::error::ErrorMetadata] { + &[ #( #metadata )* ] + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs new file mode 100644 index 0000000000000..e04d64750bca4 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -0,0 +1,139 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * Add __Ignore variant on Event +/// * Impl various trait on Event including metadata +/// * if deposit_event is defined, implement deposit_event on module. +pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { + let event = if let Some(event) = &def.event { + event + } else { + return Default::default() + }; + + let event_where_clause = &event.where_clause; + + // NOTE: actually event where clause must be a subset of config where clause because of + // `type Event: From>`. But we merge either way for potential better error message + let completed_where_clause = super::merge_where_clauses(&[ + &event.where_clause, + &def.config.where_clause, + ]); + + let event_ident = &event.event; + let frame_system = &def.frame_system; + let frame_support = &def.frame_support; + let event_use_gen = &event.gen_kind.type_use_gen(event.attr_span); + let event_impl_gen= &event.gen_kind.type_impl_gen(event.attr_span); + let metadata = event.metadata.iter() + .map(|(ident, args, docs)| { + let name = format!("{}", ident); + quote::quote_spanned!(event.attr_span => + #frame_support::event::EventMetadata { + name: #frame_support::event::DecodeDifferent::Encode(#name), + arguments: #frame_support::event::DecodeDifferent::Encode(&[ + #( #args, )* + ]), + documentation: #frame_support::event::DecodeDifferent::Encode(&[ + #( #docs, )* + ]), + }, + ) + }); + + let event_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[event.index]; + if let syn::Item::Enum(item) = item { + item + } else { + unreachable!("Checked by event parser") + } + }; + + // Phantom data is added for generic event. + if event.gen_kind.is_generic() { + let variant = syn::parse_quote!( + #[doc(hidden)] + #[codec(skip)] + __Ignore( + #frame_support::sp_std::marker::PhantomData<(#event_use_gen)>, + #frame_support::Never, + ) + ); + + // Push ignore variant at the end. + event_item.variants.push(variant); + } + + // derive some traits because system event require Clone, FullCodec, Eq, PartialEq and Debug + event_item.attrs.push(syn::parse_quote!( + #[derive( + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::RuntimeDebugNoBound, + #frame_support::codec::Encode, + #frame_support::codec::Decode, + )] + )); + + + let deposit_event = if let Some((fn_vis, fn_span)) = &event.deposit_event { + let event_use_gen = &event.gen_kind.type_use_gen(event.attr_span); + let trait_use_gen = &def.trait_use_generics(event.attr_span); + let type_impl_gen = &def.type_impl_generics(event.attr_span); + let type_use_gen = &def.type_use_generics(event.attr_span); + + quote::quote_spanned!(*fn_span => + impl<#type_impl_gen> Pallet<#type_use_gen> #completed_where_clause { + #fn_vis fn deposit_event(event: Event<#event_use_gen>) { + let event = < + ::Event as + From> + >::from(event); + + let event = < + ::Event as + Into<::Event> + >::into(event); + + <#frame_system::Pallet>::deposit_event(event) + } + } + ) + } else { + Default::default() + }; + + quote::quote_spanned!(event.attr_span => + #deposit_event + + impl<#event_impl_gen> From<#event_ident<#event_use_gen>> for () #event_where_clause { + fn from(_: #event_ident<#event_use_gen>) -> () { () } + } + + impl<#event_impl_gen> #event_ident<#event_use_gen> #event_where_clause { + #[allow(dead_code)] + #[doc(hidden)] + pub fn metadata() -> &'static [#frame_support::event::EventMetadata] { + &[ #( #metadata )* ] + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs new file mode 100644 index 0000000000000..374d21001d6a1 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -0,0 +1,71 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * implement the trait `sp_runtime::BuildModuleGenesisStorage` +/// * add #[cfg(features = "std")] to GenesisBuild implementation. +pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { + let genesis_config = if let Some(genesis_config) = &def.genesis_config { + genesis_config + } else { + return Default::default() + }; + let genesis_build = def.genesis_build.as_ref().expect("Checked by def parser"); + + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(genesis_build.attr_span); + let type_use_gen = &def.type_use_generics(genesis_build.attr_span); + let trait_use_gen = if def.config.has_instance { + quote::quote_spanned!(genesis_build.attr_span => T, I) + } else { + // `__InherentHiddenInstance` used by construct_runtime here is alias for `()` + quote::quote_spanned!(genesis_build.attr_span => T, ()) + }; + let gen_cfg_ident = &genesis_config.genesis_config; + + let gen_cfg_use_gen = genesis_config.gen_kind.type_use_gen(genesis_build.attr_span); + + let genesis_build_item = &mut def.item.content.as_mut() + .expect("Checked by def parser").1[genesis_build.index]; + + let genesis_build_item_impl = if let syn::Item::Impl(impl_) = genesis_build_item { + impl_ + } else { + unreachable!("Checked by genesis_build parser") + }; + + genesis_build_item_impl.attrs.push(syn::parse_quote!( #[cfg(feature = "std")] )); + let where_clause = &genesis_build.where_clause; + + quote::quote_spanned!(genesis_build.attr_span => + #[cfg(feature = "std")] + impl<#type_impl_gen> #frame_support::sp_runtime::BuildModuleGenesisStorage<#trait_use_gen> + for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause + { + fn build_module_genesis_storage( + &self, + storage: &mut #frame_support::sp_runtime::Storage, + ) -> std::result::Result<(), std::string::String> { + #frame_support::BasicExternalities::execute_with_storage(storage, || { + >::build(self); + Ok(()) + }) + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs new file mode 100644 index 0000000000000..1dade8f0144b9 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * add various derive trait on GenesisConfig struct. +pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { + let genesis_config = if let Some(genesis_config) = &def.genesis_config { + genesis_config + } else { + return Default::default() + }; + let frame_support = &def.frame_support; + + let genesis_config_item = &mut def.item.content.as_mut() + .expect("Checked by def parser").1[genesis_config.index]; + + match genesis_config_item { + syn::Item::Enum(syn::ItemEnum { attrs, ..}) | + syn::Item::Struct(syn::ItemStruct { attrs, .. }) | + syn::Item::Type(syn::ItemType { attrs, .. }) => { + attrs.push(syn::parse_quote!( #[cfg(feature = "std")] )); + attrs.push(syn::parse_quote!( + #[derive(#frame_support::Serialize, #frame_support::Deserialize)] + )); + attrs.push(syn::parse_quote!( #[serde(rename_all = "camelCase")] )); + attrs.push(syn::parse_quote!( #[serde(deny_unknown_fields)] )); + attrs.push(syn::parse_quote!( #[serde(bound(serialize = ""))] )); + attrs.push(syn::parse_quote!( #[serde(bound(deserialize = ""))] )); + }, + _ => unreachable!("Checked by genesis_config parser"), + } + + Default::default() +} diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs new file mode 100644 index 0000000000000..2e4fddebb7b07 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -0,0 +1,106 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * implement the individual traits using the Hooks trait +pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(def.hooks.attr_span); + let type_use_gen = &def.type_use_generics(def.hooks.attr_span); + let pallet_ident = &def.pallet_struct.pallet; + let where_clause = &def.hooks.where_clause; + let frame_system = &def.frame_system; + + quote::quote_spanned!(def.hooks.attr_span => + impl<#type_impl_gen> + #frame_support::traits::OnFinalize<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_finalize(n: ::BlockNumber) { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_finalize(n) + } + } + + impl<#type_impl_gen> + #frame_support::traits::OnInitialize<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_initialize( + n: ::BlockNumber + ) -> #frame_support::weights::Weight { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_initialize(n) + } + } + + impl<#type_impl_gen> + #frame_support::traits::OnRuntimeUpgrade + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_runtime_upgrade() -> #frame_support::weights::Weight { + let result = < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_runtime_upgrade(); + + #frame_support::crate_to_pallet_version!() + .put_into_storage::<::PalletInfo, Self>(); + + let additional_write = < + ::DbWeight as #frame_support::traits::Get<_> + >::get().writes(1); + + result.saturating_add(additional_write) + } + } + + impl<#type_impl_gen> + #frame_support::traits::OffchainWorker<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn offchain_worker(n: ::BlockNumber) { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::offchain_worker(n) + } + } + + impl<#type_impl_gen> + #frame_support::traits::IntegrityTest + for #pallet_ident<#type_use_gen> #where_clause + { + fn integrity_test() { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::integrity_test() + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/instances.rs b/frame/support/procedural/src/pallet/expand/instances.rs new file mode 100644 index 0000000000000..c60cd5ebe8d81 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/instances.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use proc_macro2::Span; +use crate::pallet::Def; + +/// * Provide inherent instance to be used by construct_runtime +/// * Provide Instance0 .. Instance16 for instantiable pallet +pub fn expand_instances(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let inherent_ident = syn::Ident::new(crate::INHERENT_INSTANCE_NAME, Span::call_site()); + let instances = if def.config.has_instance { + (0..16).map(|i| syn::Ident::new(&format!("Instance{}", i), Span::call_site())).collect() + } else { + vec![] + }; + + quote::quote!( + /// Hidden instance generated to be internally used when module is used without + /// instance. + #[doc(hidden)] + pub type #inherent_ident = (); + + #( pub use #frame_support::instances::#instances; )* + ) +} diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs new file mode 100644 index 0000000000000..c2a81e9bbcd8f --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod constants; +mod pallet_struct; +mod call; +mod error; +mod event; +mod storage; +mod hooks; +mod store_trait; +mod instances; +mod genesis_build; +mod genesis_config; +mod type_value; + +use crate::pallet::Def; +use quote::ToTokens; + +/// Merge where clause together, `where` token span is taken from the first not none one. +pub fn merge_where_clauses(clauses: &[&Option]) -> Option { + let mut clauses = clauses.iter().filter_map(|f| f.as_ref()); + let mut res = clauses.next()?.clone(); + for other in clauses { + res.predicates.extend(other.predicates.iter().cloned()) + } + Some(res) +} + +/// Expand definition, in particular: +/// * add some bounds and variants to type defined, +/// * create some new types, +/// * impl stuff on them. +pub fn expand(mut def: Def) -> proc_macro2::TokenStream { + let constants = constants::expand_constants(&mut def); + let pallet_struct = pallet_struct::expand_pallet_struct(&mut def); + let call = call::expand_call(&mut def); + let error = error::expand_error(&mut def); + let event = event::expand_event(&mut def); + let storages = storage::expand_storages(&mut def); + let instances = instances::expand_instances(&mut def); + let store_trait = store_trait::expand_store_trait(&mut def); + let hooks = hooks::expand_hooks(&mut def); + let genesis_build = genesis_build::expand_genesis_build(&mut def); + let genesis_config = genesis_config::expand_genesis_config(&mut def); + let type_values = type_value::expand_type_values(&mut def); + + let new_items = quote::quote!( + #constants + #pallet_struct + #call + #error + #event + #storages + #instances + #store_trait + #hooks + #genesis_build + #genesis_config + #type_values + ); + + def.item.content.as_mut().expect("This is checked by parsing").1 + .push(syn::Item::Verbatim(new_items)); + + def.item.into_token_stream() +} diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs new file mode 100644 index 0000000000000..6e456695d9a45 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -0,0 +1,127 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * Add derive trait on Pallet +/// * Implement GetPalletVersion on Pallet +/// * Implement OnGenesis on Pallet +/// * Implement ModuleErrorMetadata on Pallet +/// * declare Module type alias for construct_runtime +/// * replace the first field type of `struct Pallet` with `PhantomData` if it is `_` +pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(def.pallet_struct.attr_span); + let type_use_gen = &def.type_use_generics(def.pallet_struct.attr_span); + let type_decl_gen = &def.type_decl_generics(def.pallet_struct.attr_span); + let pallet_ident = &def.pallet_struct.pallet; + let config_where_clause = &def.config.where_clause; + + let pallet_item = { + let pallet_module_items = &mut def.item.content.as_mut().expect("Checked by def").1; + let item = &mut pallet_module_items[def.pallet_struct.index]; + if let syn::Item::Struct(item) = item { + item + } else { + unreachable!("Checked by pallet struct parser") + } + }; + + // If the first field type is `_` then we replace with `PhantomData` + if let Some(field) = pallet_item.fields.iter_mut().next() { + if field.ty == syn::parse_quote!(_) { + field.ty = syn::parse_quote!( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen)> + ); + } + } + + pallet_item.attrs.push(syn::parse_quote!( + #[derive( + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::RuntimeDebugNoBound, + )] + )); + + let module_error_metadata = if let Some(error_def) = &def.error { + let error_ident = &error_def.error; + quote::quote_spanned!(def.pallet_struct.attr_span => + impl<#type_impl_gen> #frame_support::error::ModuleErrorMetadata + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn metadata() -> &'static [#frame_support::error::ErrorMetadata] { + < + #error_ident<#type_use_gen> as #frame_support::error::ModuleErrorMetadata + >::metadata() + } + } + ) + } else { + quote::quote_spanned!(def.pallet_struct.attr_span => + impl<#type_impl_gen> #frame_support::error::ModuleErrorMetadata + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn metadata() -> &'static [#frame_support::error::ErrorMetadata] { + &[] + } + } + ) + }; + + quote::quote_spanned!(def.pallet_struct.attr_span => + #module_error_metadata + + /// Type alias to `Pallet`, to be used by `construct_runtime`. + /// + /// Generated by `pallet` attribute macro. + pub type Module<#type_decl_gen> = #pallet_ident<#type_use_gen>; + + // Implement `GetPalletVersion` for `Pallet` + impl<#type_impl_gen> #frame_support::traits::GetPalletVersion + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn current_version() -> #frame_support::traits::PalletVersion { + #frame_support::crate_to_pallet_version!() + } + + fn storage_version() -> Option<#frame_support::traits::PalletVersion> { + let key = #frame_support::traits::PalletVersion::storage_key::< + ::PalletInfo, Self + >().expect("Every active pallet has a name in the runtime; qed"); + + #frame_support::storage::unhashed::get(&key) + } + } + + // Implement `OnGenesis` for `Pallet` + impl<#type_impl_gen> #frame_support::traits::OnGenesis + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn on_genesis() { + #frame_support::crate_to_pallet_version!() + .put_into_storage::<::PalletInfo, Self>(); + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs new file mode 100644 index 0000000000000..7948fca2faf06 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -0,0 +1,286 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use crate::pallet::parse::storage::{Metadata, QueryKind}; +use frame_support_procedural_tools::clean_type_string; + +/// Generate the prefix_ident related the the storage. +/// prefix_ident is used for the prefix struct to be given to storage as first generic param. +fn prefix_ident(storage_ident: &syn::Ident) -> syn::Ident { + syn::Ident::new(&format!("_GeneratedPrefixForStorage{}", storage_ident), storage_ident.span()) +} + +/// * generate StoragePrefix structs (e.g. for a storage `MyStorage` a struct with the name +/// `_GeneratedPrefixForStorage$NameOfStorage` is generated) and implements StorageInstance trait. +/// * replace the first generic `_` by the generated prefix structure +/// * generate metadatas +pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let pallet_ident = &def.pallet_struct.pallet; + + // Replace first arg `_` by the generated prefix structure. + // Add `#[allow(type_alias_bounds)]` + for storage_def in def.storages.iter_mut() { + let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; + + let typ_item = if let syn::Item::Type(t) = item { + t + } else { + unreachable!("Checked by def"); + }; + + typ_item.attrs.push(syn::parse_quote!(#[allow(type_alias_bounds)])); + + let typ_path = if let syn::Type::Path(p) = &mut *typ_item.ty { + p + } else { + unreachable!("Checked by def"); + }; + + let args = if let syn::PathArguments::AngleBracketed(args) = + &mut typ_path.path.segments[0].arguments + { + args + } else { + unreachable!("Checked by def"); + }; + + let type_use_gen = if def.config.has_instance { + quote::quote_spanned!(storage_def.attr_span => T, I) + } else { + quote::quote_spanned!(storage_def.attr_span => T) + }; + let prefix_ident = prefix_ident(&storage_def.ident); + args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); + } + + let entries = def.storages.iter() + .map(|storage| { + let docs = &storage.docs; + + let ident = &storage.ident; + let gen = &def.type_use_generics(storage.attr_span); + let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + + let metadata_trait = match &storage.metadata { + Metadata::Value { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageValueMetadata + ), + Metadata::Map { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageMapMetadata + ), + Metadata::DoubleMap { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageDoubleMapMetadata + ), + }; + + let ty = match &storage.metadata { + Metadata::Value { value } => { + let value = clean_type_string("e::quote!(#value).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::Plain( + #frame_support::metadata::DecodeDifferent::Encode(#value) + ) + ) + }, + Metadata::Map { key, value } => { + let value = clean_type_string("e::quote!(#value).to_string()); + let key = clean_type_string("e::quote!(#key).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::Map { + hasher: <#full_ident as #metadata_trait>::HASHER, + key: #frame_support::metadata::DecodeDifferent::Encode(#key), + value: #frame_support::metadata::DecodeDifferent::Encode(#value), + unused: false, + } + ) + }, + Metadata::DoubleMap { key1, key2, value } => { + let value = clean_type_string("e::quote!(#value).to_string()); + let key1 = clean_type_string("e::quote!(#key1).to_string()); + let key2 = clean_type_string("e::quote!(#key2).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::DoubleMap { + hasher: <#full_ident as #metadata_trait>::HASHER1, + key2_hasher: <#full_ident as #metadata_trait>::HASHER2, + key1: #frame_support::metadata::DecodeDifferent::Encode(#key1), + key2: #frame_support::metadata::DecodeDifferent::Encode(#key2), + value: #frame_support::metadata::DecodeDifferent::Encode(#value), + } + ) + } + }; + + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryMetadata { + name: #frame_support::metadata::DecodeDifferent::Encode( + <#full_ident as #metadata_trait>::NAME + ), + modifier: <#full_ident as #metadata_trait>::MODIFIER, + ty: #ty, + default: #frame_support::metadata::DecodeDifferent::Encode( + <#full_ident as #metadata_trait>::DEFAULT + ), + documentation: #frame_support::metadata::DecodeDifferent::Encode(&[ + #( #docs, )* + ]), + } + ) + }); + + let getters = def.storages.iter() + .map(|storage| if let Some(getter) = &storage.getter { + let completed_where_clause = super::merge_where_clauses(&[ + &storage.where_clause, + &def.config.where_clause, + ]); + let docs = storage.docs.iter() + .map(|d| quote::quote_spanned!(storage.attr_span => #[doc = #d])); + + let ident = &storage.ident; + let gen = &def.type_use_generics(storage.attr_span); + let type_impl_gen = &def.type_impl_generics(storage.attr_span); + let type_use_gen = &def.type_use_generics(storage.attr_span); + let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + + match &storage.metadata { + Metadata::Value { value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter() -> #query { + < + #full_ident as #frame_support::storage::StorageValue<#value> + >::get() + } + } + ) + }, + Metadata::Map { key, value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(k: KArg) -> #query where + KArg: #frame_support::codec::EncodeLike<#key>, + { + < + #full_ident as #frame_support::storage::StorageMap<#key, #value> + >::get(k) + } + } + ) + }, + Metadata::DoubleMap { key1, key2, value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(k1: KArg1, k2: KArg2) -> #query where + KArg1: #frame_support::codec::EncodeLike<#key1>, + KArg2: #frame_support::codec::EncodeLike<#key2>, + { + < + #full_ident as + #frame_support::storage::StorageDoubleMap<#key1, #key2, #value> + >::get(k1, k2) + } + } + ) + }, + } + } else { + Default::default() + }); + + let prefix_structs = def.storages.iter().map(|storage_def| { + let type_impl_gen = &def.type_impl_generics(storage_def.attr_span); + let type_use_gen = &def.type_use_generics(storage_def.attr_span); + let prefix_struct_ident = prefix_ident(&storage_def.ident); + let prefix_struct_vis = &storage_def.vis; + let prefix_struct_const = storage_def.ident.to_string(); + let config_where_clause = &def.config.where_clause; + + quote::quote_spanned!(storage_def.attr_span => + #prefix_struct_vis struct #prefix_struct_ident<#type_use_gen>( + core::marker::PhantomData<(#type_use_gen,)> + ); + impl<#type_impl_gen> #frame_support::traits::StorageInstance + for #prefix_struct_ident<#type_use_gen> + #config_where_clause + { + fn pallet_prefix() -> &'static str { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name::>() + .expect("Every active pallet has a name in the runtime; qed") + } + const STORAGE_PREFIX: &'static str = #prefix_struct_const; + } + ) + }); + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); + let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); + + quote::quote!( + impl<#type_impl_gen> #pallet_ident<#type_use_gen> + #completed_where_clause + { + #[doc(hidden)] + pub fn storage_metadata() -> #frame_support::metadata::StorageMetadata { + #frame_support::metadata::StorageMetadata { + prefix: #frame_support::metadata::DecodeDifferent::Encode( + < + ::PalletInfo as + #frame_support::traits::PalletInfo + >::name::<#pallet_ident<#type_use_gen>>() + .expect("Every active pallet has a name in the runtime; qed") + ), + entries: #frame_support::metadata::DecodeDifferent::Encode( + &[ #( #entries, )* ] + ), + } + } + } + + #( #getters )* + #( #prefix_structs )* + ) +} diff --git a/frame/support/procedural/src/pallet/expand/store_trait.rs b/frame/support/procedural/src/pallet/expand/store_trait.rs new file mode 100644 index 0000000000000..cdc7e2837245f --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/store_trait.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use syn::spanned::Spanned; + +/// If attribute `#[pallet::generate_store(..)]` is defined then: +/// * generate Store trait with all storages, +/// * implement Store trait for Pallet. +pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { + let (trait_vis, trait_store) = if let Some(store) = &def.pallet_struct.store { + store + } else { + return Default::default() + }; + + let type_impl_gen = &def.type_impl_generics(trait_store.span()); + let type_use_gen = &def.type_use_generics(trait_store.span()); + let pallet_ident = &def.pallet_struct.pallet; + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + + let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); + + quote::quote_spanned!(trait_store.span() => + #trait_vis trait #trait_store { + #( + type #storage_names; + )* + } + impl<#type_impl_gen> #trait_store for #pallet_ident<#type_use_gen> + #completed_where_clause + { + #( + type #storage_names = #storage_names<#type_use_gen>; + )* + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/type_value.rs b/frame/support/procedural/src/pallet/expand/type_value.rs new file mode 100644 index 0000000000000..b1b94eb4fbe6b --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/type_value.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// * Generate the struct +/// * implement the `Get<..>` on it +/// * Rename the name of the function to internal name +pub fn expand_type_values(def: &mut Def) -> proc_macro2::TokenStream { + let mut expand = quote::quote!(); + let frame_support = &def.frame_support; + + for type_value in &def.type_values { + let fn_name_str = &type_value.ident.to_string(); + let fn_name_snakecase = inflector::cases::snakecase::to_snake_case(fn_name_str); + let fn_ident_renamed = syn::Ident::new( + &format!("__type_value_for_{}", fn_name_snakecase), + type_value.ident.span(), + ); + + let type_value_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def").1[type_value.index]; + if let syn::Item::Fn(item) = item { + item + } else { + unreachable!("Checked by error parser") + } + }; + + // Rename the type_value function name + type_value_item.sig.ident = fn_ident_renamed.clone(); + + let vis = &type_value.vis; + let ident = &type_value.ident; + let type_ = &type_value.type_; + let where_clause = &type_value.where_clause; + + let (struct_impl_gen, struct_use_gen) = if type_value.is_generic { + ( + def.type_impl_generics(type_value.attr_span), + def.type_use_generics(type_value.attr_span), + ) + } else { + (Default::default(), Default::default()) + }; + + expand.extend(quote::quote_spanned!(type_value.attr_span => + #vis struct #ident<#struct_use_gen>(core::marker::PhantomData<((), #struct_use_gen)>); + impl<#struct_impl_gen> #frame_support::traits::Get<#type_> for #ident<#struct_use_gen> + #where_clause + { + fn get() -> #type_ { + #fn_ident_renamed::<#struct_use_gen>() + } + } + )); + } + expand +} diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs new file mode 100644 index 0000000000000..560d57d50e037 --- /dev/null +++ b/frame/support/procedural/src/pallet/mod.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation for pallet attribute macro. +//! +//! General workflow: +//! 1 - parse all pallet attributes: +//! This step remove all attributes `#[pallet::*]` from the ItemMod and build the `Def` struct +//! which holds the ItemMod without `#[pallet::*]` and information given by those attributes +//! 2 - expand from the parsed information +//! This step will modify the ItemMod by adding some derive attributes or phantom data variants +//! to user defined types. And also crate new types and implement block. + +mod parse; +mod expand; + +pub use parse::Def; +use syn::spanned::Spanned; + +pub fn pallet( + attr: proc_macro::TokenStream, + item: proc_macro::TokenStream +) -> proc_macro::TokenStream { + if !attr.is_empty() { + let msg = "Invalid pallet macro call: expected no attributes, e.g. macro call must be just \ + `#[frame_support::pallet]` or `#[pallet]`"; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into(); + } + + let item = syn::parse_macro_input!(item as syn::ItemMod); + match parse::Def::try_from(item) { + Ok(def) => expand::expand(def).into(), + Err(e) => e.to_compile_error().into(), + } +} diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs new file mode 100644 index 0000000000000..880cf54f8b2c9 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -0,0 +1,234 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(DispatchResultWithPostInfo); + syn::custom_keyword!(Call); + syn::custom_keyword!(OriginFor); + syn::custom_keyword!(weight); + syn::custom_keyword!(compact); + syn::custom_keyword!(T); + syn::custom_keyword!(pallet); +} + +/// Definition of dispatchables typically `impl Pallet { ... }` +pub struct CallDef { + /// The where_clause used. + pub where_clause: Option, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The index of call item in pallet module. + pub index: usize, + /// Information on methods (used for expansion). + pub methods: Vec, + /// The span of the pallet::call attribute. + pub attr_span: proc_macro2::Span, +} + +/// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` +pub struct CallVariantDef { + /// Function name. + pub name: syn::Ident, + /// Information on args: `(is_compact, name, type)` + pub args: Vec<(bool, syn::Ident, Box)>, + /// Weight formula. + pub weight: syn::Expr, + /// Docs, used for metadata. + pub docs: Vec, +} + +/// Attributes for functions in call impl block. +/// Parse for `#[pallet::weight(expr)]` +pub struct FunctionAttr { + weight: syn::Expr, +} + +impl syn::parse::Parse for FunctionAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + content.parse::()?; + + let weight_content; + syn::parenthesized!(weight_content in content); + Ok(FunctionAttr { + weight: weight_content.parse::()?, + }) + } +} + +/// Attribute for arguments in function in call impl block. +/// Parse for `#[pallet::compact]| +pub struct ArgAttrIsCompact; + +impl syn::parse::Parse for ArgAttrIsCompact { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + content.parse::()?; + Ok(ArgAttrIsCompact) + } +} + +/// Check the syntax is `OriginFor` +pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { + + pub struct CheckDispatchableFirstArg; + impl syn::parse::Parse for CheckDispatchableFirstArg { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + + Ok(Self) + } + } + + syn::parse2::(ty.to_token_stream()) + .map_err(|e| { + let msg = "Invalid type: expected `OriginFor`"; + let mut err = syn::Error::new(ty.span(), msg); + err.combine(e); + err + })?; + + Ok(()) +} + +impl CallDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")); + }; + + let mut instances = vec![]; + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + + if let Some((_, _, for_)) = item.trait_ { + let msg = "Invalid pallet::call, expected no trait ident as in \ + `impl<..> Pallet<..> { .. }`"; + return Err(syn::Error::new(for_.span(), msg)) + } + + let mut methods = vec![]; + for impl_item in &mut item.items { + if let syn::ImplItem::Method(method) = impl_item { + match method.sig.inputs.first() { + None => { + let msg = "Invalid pallet::call, must have at least origin arg"; + return Err(syn::Error::new(method.sig.span(), msg)); + }, + Some(syn::FnArg::Receiver(_)) => { + let msg = "Invalid pallet::call, first argument must be a typed argument, \ + e.g. `origin: OriginFor`"; + return Err(syn::Error::new(method.sig.span(), msg)); + }, + Some(syn::FnArg::Typed(arg)) => { + check_dispatchable_first_arg_type(&*arg.ty)?; + }, + } + + if let syn::ReturnType::Type(_, type_) = &method.sig.output { + syn::parse2::(type_.to_token_stream())?; + } else { + let msg = "Invalid pallet::call, require return type \ + DispatchResultWithPostInfo"; + return Err(syn::Error::new(method.sig.span(), msg)); + } + + let mut call_var_attrs: Vec = + helper::take_item_attrs(&mut method.attrs)?; + + if call_var_attrs.len() != 1 { + let msg = if call_var_attrs.is_empty() { + "Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]`" + } else { + "Invalid pallet::call, too many weight attributes given" + }; + return Err(syn::Error::new(method.sig.span(), msg)); + } + let weight = call_var_attrs.pop().unwrap().weight; + + let mut args = vec![]; + for arg in method.sig.inputs.iter_mut().skip(1) { + let arg = if let syn::FnArg::Typed(arg) = arg { + arg + } else { + unreachable!("Only first argument can be receiver"); + }; + + let arg_attrs: Vec = + helper::take_item_attrs(&mut arg.attrs)?; + + if arg_attrs.len() > 1 { + let msg = "Invalid pallet::call, argument has too many attributes"; + return Err(syn::Error::new(arg.span(), msg)); + } + + let arg_ident = if let syn::Pat::Ident(pat) = &*arg.pat { + pat.ident.clone() + } else { + let msg = "Invalid pallet::call, argument must be ident"; + return Err(syn::Error::new(arg.pat.span(), msg)); + }; + + args.push((!arg_attrs.is_empty(), arg_ident, arg.ty.clone())); + } + + let docs = helper::get_doc_literals(&method.attrs); + + methods.push(CallVariantDef { + name: method.sig.ident.clone(), + weight, + args, + docs, + }); + } else { + let msg = "Invalid pallet::call, only method accepted"; + return Err(syn::Error::new(impl_item.span(), msg)); + } + } + + Ok(Self { + index, + attr_span, + instances, + methods, + where_clause: item.generics.where_clause.clone(), + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs new file mode 100644 index 0000000000000..44525164f03d5 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -0,0 +1,387 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; +use quote::ToTokens; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Config); + syn::custom_keyword!(From); + syn::custom_keyword!(T); + syn::custom_keyword!(I); + syn::custom_keyword!(Get); + syn::custom_keyword!(config); + syn::custom_keyword!(IsType); + syn::custom_keyword!(Event); + syn::custom_keyword!(constant); + syn::custom_keyword!(frame_system); + syn::custom_keyword!(disable_frame_system_supertrait_check); +} + +/// Input definition for the pallet config. +pub struct ConfigDef { + /// The index of item in pallet module. + pub index: usize, + /// Whether the trait has instance (i.e. define with `Config`) + pub has_instance: bool, + /// Const associated type. + pub consts_metadata: Vec, + /// Whether the trait has the associated type `Event`, note that those bounds are checked: + /// * `IsType::Event` + /// * `From` or `From>` or `From>` + pub has_event_type: bool, + /// The where clause on trait definition but modified so `Self` is `T`. + pub where_clause: Option, + /// The span of the pallet::config attribute. + pub attr_span: proc_macro2::Span, +} + +/// Input definition for a constant in pallet config. +pub struct ConstMetadataDef { + /// Name of the associated type. + pub ident: syn::Ident, + /// The type in Get, e.g. `u32` in `type Foo: Get;`, but `Self` is replaced by `T` + pub type_: syn::Type, + /// The doc associated + pub doc: Vec, +} + +impl syn::parse::Parse for ConstMetadataDef { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let doc = helper::get_doc_literals(&syn::Attribute::parse_outer(input)?); + input.parse::()?; + let ident = input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + let mut type_ = input.parse::()?; + type_ = syn::parse2::(replace_self_by_t(type_.to_token_stream())) + .expect("Internal error: replacing `Self` by `T` should result in valid type"); + input.parse::]>()?; + input.parse::()?; + + Ok(Self { ident, type_, doc }) + } +} + +/// Parse for `#[pallet::disable_frame_system_supertrait_check]` +pub struct DisableFrameSystemSupertraitCheck; + +impl syn::parse::Parse for DisableFrameSystemSupertraitCheck { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + content.parse::()?; + Ok(Self) + } +} + +/// Parse for `#[pallet::constant]` +pub struct TypeAttrConst(proc_macro2::Span); + +impl Spanned for TypeAttrConst { + fn span(&self) -> proc_macro2::Span { + self.0 + } +} + +impl syn::parse::Parse for TypeAttrConst { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + Ok(TypeAttrConst(content.parse::()?.span())) + } +} + +/// Parse for `$ident::Config` +pub struct ConfigBoundParse(syn::Ident); + +impl syn::parse::Parse for ConfigBoundParse { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let ident = input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(ident)) + } +} + +/// Parse for `IsType<::Event>` and retrieve `$ident` +pub struct IsTypeBoundEventParse(syn::Ident); + +impl syn::parse::Parse for IsTypeBoundEventParse { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + let ident = input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + + Ok(Self(ident)) + } +} + +/// Parse for `From` or `From>` or `From>` +pub struct FromEventParse { + is_generic: bool, + has_instance: bool, +} + +impl syn::parse::Parse for FromEventParse { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut is_generic = false; + let mut has_instance = false; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![<]) { + is_generic = true; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + input.parse::()?; + input.parse::()?; + has_instance = true; + } + input.parse::]>()?; + } + input.parse::]>()?; + + Ok(Self { is_generic, has_instance }) + } +} + +/// Check if trait_item is `type Event`, if so checks its bounds are those expected. +/// (Event type is reserved type) +fn check_event_type( + frame_system: &syn::Ident, + trait_item: &syn::TraitItem, + trait_has_instance: bool +) -> syn::Result { + if let syn::TraitItem::Type(type_) = trait_item { + if type_.ident == "Event" { + // Check event has no generics + if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { + let msg = "Invalid `type Event`, associated type `Event` is reserved and must have\ + no generics nor where_clause"; + return Err(syn::Error::new(trait_item.span(), msg)); + } + // Check bound contains IsType and From + + let has_is_type_bound = type_.bounds.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| b.0 == *frame_system) + }); + + if !has_is_type_bound { + let msg = format!( + "Invalid `type Event`, associated type `Event` is reserved and must \ + bound: `IsType<::Event>`", + frame_system, + ); + return Err(syn::Error::new(type_.span(), msg)); + } + + let from_event_bound = type_.bounds.iter().find_map(|s| { + syn::parse2::(s.to_token_stream()).ok() + }); + + let from_event_bound = if let Some(b) = from_event_bound { + b + } else { + let msg = "Invalid `type Event`, associated type `Event` is reserved and must \ + bound: `From` or `From>` or `From>`"; + return Err(syn::Error::new(type_.span(), msg)); + }; + + if from_event_bound.is_generic + && (from_event_bound.has_instance != trait_has_instance) + { + let msg = "Invalid `type Event`, associated type `Event` bounds inconsistent \ + `From`. Config and generic Event must be both with instance or \ + without instance"; + return Err(syn::Error::new(type_.span(), msg)); + } + + Ok(true) + } else { + Ok(false) + } + } else { + Ok(false) + } +} + +/// Replace ident `Self` by `T` +pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenStream { + input.into_iter() + .map(|token_tree| match token_tree { + proc_macro2::TokenTree::Group(group) => + proc_macro2::Group::new( + group.delimiter(), + replace_self_by_t(group.stream()) + ).into(), + proc_macro2::TokenTree::Ident(ident) if ident == "Self" => + proc_macro2::Ident::new("T", ident.span()).into(), + other => other + }) + .collect() +} + +impl ConfigDef { + pub fn try_from( + frame_system: &syn::Ident, + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Trait(item) = item { + item + } else { + let msg = "Invalid pallet::config, expected trait definition"; + return Err(syn::Error::new(item.span(), msg)); + }; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::config, trait must be public"; + return Err(syn::Error::new(item.span(), msg)); + } + + syn::parse2::(item.ident.to_token_stream())?; + + + let where_clause = { + let stream = replace_self_by_t(item.generics.where_clause.to_token_stream()); + syn::parse2::>(stream) + .expect("Internal error: replacing `Self` by `T` should result in valid where + clause") + }; + + if item.generics.params.len() > 1 { + let msg = "Invalid pallet::config, expected no more than one generic"; + return Err(syn::Error::new(item.generics.params[2].span(), msg)); + } + + let has_instance = if item.generics.params.first().is_some() { + helper::check_config_def_gen(&item.generics, item.ident.span())?; + true + } else { + false + }; + + let mut has_event_type = false; + let mut consts_metadata = vec![]; + for trait_item in &mut item.items { + // Parse for event + has_event_type = has_event_type + || check_event_type(frame_system, trait_item, has_instance)?; + + // Parse for constant + let type_attrs_const: Vec = helper::take_item_attrs(trait_item)?; + + if type_attrs_const.len() > 1 { + let msg = "Invalid attribute in pallet::config, only one attribute is expected"; + return Err(syn::Error::new(type_attrs_const[1].span(), msg)); + } + + if type_attrs_const.len() == 1 { + match trait_item { + syn::TraitItem::Type(type_) => { + let constant = syn::parse2::(type_.to_token_stream()) + .map_err(|e| { + let error_msg = "Invalid usage of `#[pallet::constant]`, syntax \ + must be `type $SomeIdent: Get<$SomeType>;`"; + let mut err = syn::Error::new(type_.span(), error_msg); + err.combine(e); + err + })?; + + consts_metadata.push(constant); + }, + _ => { + let msg = "Invalid pallet::constant in pallet::config, expected type trait \ + item"; + return Err(syn::Error::new(trait_item.span(), msg)); + }, + } + } + } + + let attr: Option = helper::take_first_item_attr( + &mut item.attrs + )?; + + let disable_system_supertrait_check = attr.is_some(); + + let has_frame_system_supertrait = item.supertraits.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| b.0 == *frame_system) + }); + + if !has_frame_system_supertrait && !disable_system_supertrait_check { + let found = if item.supertraits.is_empty() { + "none".to_string() + } else { + let mut found = item.supertraits.iter() + .fold(String::new(), |acc, s| { + format!("{}`{}`, ", acc, quote::quote!(#s).to_string()) + }); + found.pop(); + found.pop(); + found + }; + + let msg = format!( + "Invalid pallet::trait, expected explicit `{}::Config` as supertrait, \ + found {}. \ + (try `pub trait Config: frame_system::Config {{ ...` or \ + `pub trait Config: frame_system::Config {{ ...`). \ + To disable this check, use `#[pallet::disable_frame_system_supertrait_check]`", + frame_system, + found, + ); + return Err(syn::Error::new(item.span(), msg)); + } + + Ok(Self { + index, + has_instance, + consts_metadata, + has_event_type, + where_clause, + attr_span, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/error.rs b/frame/support/procedural/src/pallet/parse/error.rs new file mode 100644 index 0000000000000..49aaebc87f428 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/error.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; +use quote::ToTokens; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Error); +} + +/// This checks error declaration as a enum declaration with only variants without fields nor +/// discriminant. +pub struct ErrorDef { + /// The index of error item in pallet module. + pub index: usize, + /// Variants ident and doc literals (ordered as declaration order) + pub variants: Vec<(syn::Ident, Vec)>, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The keyword error used (contains span). + pub error: keyword::Error, + /// The span of the pallet::error attribute. + pub attr_span: proc_macro2::Span, +} + +impl ErrorDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Enum(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")); + }; + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::error, `Error` must be public"; + return Err(syn::Error::new(item.span(), msg)); + } + + let mut instances = vec![]; + instances.push(helper::check_type_def_gen_no_bounds(&item.generics, item.ident.span())?); + + if item.generics.where_clause.is_some() { + let msg = "Invalid pallet::error, where clause is not allowed on pallet error item"; + return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)); + } + + let error = syn::parse2::(item.ident.to_token_stream())?; + + let variants = item.variants.iter() + .map(|variant| { + if !matches!(variant.fields, syn::Fields::Unit) { + let msg = "Invalid pallet::error, unexpected fields, must be `Unit`"; + return Err(syn::Error::new(variant.fields.span(), msg)); + } + if variant.discriminant.is_some() { + let msg = "Invalid pallet::error, unexpected discriminant, discriminant \ + are not supported"; + let span = variant.discriminant.as_ref().unwrap().0.span(); + return Err(syn::Error::new(span, msg)); + } + + Ok((variant.ident.clone(), helper::get_doc_literals(&variant.attrs))) + }) + .collect::>()?; + + Ok(ErrorDef { + attr_span, + index, + variants, + instances, + error, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/event.rs b/frame/support/procedural/src/pallet/parse/event.rs new file mode 100644 index 0000000000000..7d8b7d075ef23 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/event.rs @@ -0,0 +1,227 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; +use quote::ToTokens; +use frame_support_procedural_tools::clean_type_string; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(metadata); + syn::custom_keyword!(Event); + syn::custom_keyword!(pallet); + syn::custom_keyword!(generate_deposit); + syn::custom_keyword!(deposit_event); +} + +/// Definition for pallet event enum. +pub struct EventDef { + /// The index of event item in pallet module. + pub index: usize, + /// The keyword Event used (contains span). + pub event: keyword::Event, + /// Event metadatas: `(name, args, docs)`. + pub metadata: Vec<(syn::Ident, Vec, Vec)>, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The kind of generic the type `Event` has. + pub gen_kind: super::GenericKind, + /// Whether the function `deposit_event` must be generated. + pub deposit_event: Option<(syn::Visibility, proc_macro2::Span)>, + /// Where clause used in event definition. + pub where_clause: Option, + /// The span of the pallet::event attribute. + pub attr_span: proc_macro2::Span, +} + +/// Attribute for Event: defines metadata name to use. +/// +/// Syntax is: +/// * `#[pallet::metadata(SomeType = MetadataName, ...)]` +/// * `#[pallet::generate_deposit($vis fn deposit_event)]` +enum PalletEventAttr { + Metadata { + metadata: Vec<(syn::Type, String)>, + // Span of the attribute + span: proc_macro2::Span, + }, + DepositEvent { + fn_vis: syn::Visibility, + // Span for the keyword deposit_event + fn_span: proc_macro2::Span, + // Span of the attribute + span: proc_macro2::Span, + }, +} + +impl PalletEventAttr { + fn span(&self) -> proc_macro2::Span { + match self { + Self::Metadata { span, .. } => *span, + Self::DepositEvent { span, .. } => *span, + } + } +} + +/// Parse for syntax `$Type = "$SomeString"`. +fn parse_event_metadata_element( + input: syn::parse::ParseStream +) -> syn::Result<(syn::Type, String)> { + let typ = input.parse::()?; + input.parse::()?; + let ident = input.parse::()?; + Ok((typ, ident.value())) +} + +impl syn::parse::Parse for PalletEventAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::metadata) { + let span = content.parse::()?.span(); + let metadata_content; + syn::parenthesized!(metadata_content in content); + + let metadata = metadata_content + .parse_terminated::<_, syn::Token![,]>(parse_event_metadata_element)? + .into_pairs() + .map(syn::punctuated::Pair::into_value) + .collect(); + + Ok(PalletEventAttr::Metadata { metadata, span }) + } else if lookahead.peek(keyword::generate_deposit) { + let span = content.parse::()?.span(); + + let generate_content; + syn::parenthesized!(generate_content in content); + let fn_vis = generate_content.parse::()?; + generate_content.parse::()?; + let fn_span = generate_content.parse::()?.span(); + + + Ok(PalletEventAttr::DepositEvent { fn_vis, span, fn_span }) + } else { + Err(lookahead.error()) + } + } +} + +struct PalletEventAttrInfo { + metadata: Option>, + deposit_event: Option<(syn::Visibility, proc_macro2::Span)>, +} + +impl PalletEventAttrInfo { + fn from_attrs(attrs: Vec) -> syn::Result { + let mut metadata = None; + let mut deposit_event = None; + for attr in attrs { + match attr { + PalletEventAttr::Metadata { metadata: m, .. } if metadata.is_none() => + metadata = Some(m), + PalletEventAttr::DepositEvent { fn_vis, fn_span, .. } if deposit_event.is_none() => + deposit_event = Some((fn_vis, fn_span)), + attr => { + return Err(syn::Error::new(attr.span(), "Duplicate attribute")); + } + } + } + + Ok(PalletEventAttrInfo { metadata, deposit_event }) + } +} + +impl EventDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Enum(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")) + }; + + let event_attrs: Vec = helper::take_item_attrs(&mut item.attrs)?; + let attr_info = PalletEventAttrInfo::from_attrs(event_attrs)?; + let metadata = attr_info.metadata.unwrap_or_else(Vec::new); + let deposit_event = attr_info.deposit_event; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::event, `Error` must be public"; + return Err(syn::Error::new(item.span(), msg)); + } + + let where_clause = item.generics.where_clause.clone(); + + let mut instances = vec![]; + // NOTE: Event is not allowed to be only generic on I because it is not supported + // by construct_runtime. + if let Some(u) = helper::check_type_def_optional_gen(&item.generics, item.ident.span())? { + instances.push(u); + } else { + // construct_runtime only allow non generic event for non instantiable pallet. + instances.push(helper::InstanceUsage { + has_instance: false, + span: item.ident.span(), + }) + } + + let has_instance = item.generics.type_params().any(|t| t.ident == "I"); + let has_config = item.generics.type_params().any(|t| t.ident == "T"); + let gen_kind = super::GenericKind::from_gens(has_config, has_instance) + .expect("Checked by `helper::check_type_def_optional_gen` above"); + + let event = syn::parse2::(item.ident.to_token_stream())?; + + let metadata = item.variants.iter() + .map(|variant| { + let name = variant.ident.clone(); + let docs = helper::get_doc_literals(&variant.attrs); + let args = variant.fields.iter() + .map(|field| { + metadata.iter().find(|m| m.0 == field.ty) + .map(|m| m.1.clone()) + .unwrap_or_else(|| { + clean_type_string(&field.ty.to_token_stream().to_string()) + }) + }) + .collect(); + + (name, args, docs) + }) + .collect(); + + Ok(EventDef { + attr_span, + index, + metadata, + instances, + deposit_event, + event, + gen_kind, + where_clause, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/extra_constants.rs b/frame/support/procedural/src/pallet/parse/extra_constants.rs new file mode 100644 index 0000000000000..430bf94783774 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(DispatchResultWithPostInfo); + syn::custom_keyword!(Call); + syn::custom_keyword!(OriginFor); + syn::custom_keyword!(weight); + syn::custom_keyword!(compact); + syn::custom_keyword!(T); + syn::custom_keyword!(pallet); +} + +/// Definition of extra constants typically `impl Pallet { ... }` +pub struct ExtraConstantsDef { + /// The where_clause used. + pub where_clause: Option, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The index of call item in pallet module. + pub index: usize, + /// The extra constant defined. + pub extra_constants: Vec, +} + +/// Input definition for an constant in pallet. +pub struct ExtraConstantDef { + /// Name of the function + pub ident: syn::Ident, + /// The type returned by the function + pub type_: syn::Type, + /// The doc associated + pub doc: Vec, +} + +impl ExtraConstantsDef { + pub fn try_from( + index: usize, + item: &mut syn::Item + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")); + }; + + let mut instances = vec![]; + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + + if let Some((_, _, for_)) = item.trait_ { + let msg = "Invalid pallet::call, expected no trait ident as in \ + `impl<..> Pallet<..> { .. }`"; + return Err(syn::Error::new(for_.span(), msg)) + } + + let mut extra_constants = vec![]; + for impl_item in &mut item.items { + let method = if let syn::ImplItem::Method(method) = impl_item { + method + } else { + let msg = "Invalid pallet::call, only method accepted"; + return Err(syn::Error::new(impl_item.span(), msg)); + }; + + if !method.sig.inputs.is_empty() { + let msg = "Invalid pallet::extra_constants, method must have 0 args"; + return Err(syn::Error::new(method.sig.span(), msg)); + } + + if !method.sig.generics.params.is_empty() { + let msg = "Invalid pallet::extra_constants, method must have 0 generics"; + return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)); + } + + if method.sig.generics.where_clause.is_some() { + let msg = "Invalid pallet::extra_constants, method must have no where clause"; + return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)); + } + + let type_ = match &method.sig.output { + syn::ReturnType::Default => { + let msg = "Invalid pallet::extra_constants, method must have a return type"; + return Err(syn::Error::new(method.span(), msg)); + }, + syn::ReturnType::Type(_, type_) => *type_.clone(), + }; + + extra_constants.push(ExtraConstantDef { + ident: method.sig.ident.clone(), + type_, + doc: helper::get_doc_literals(&method.attrs), + }); + } + + Ok(Self { + index, + instances, + where_clause: item.generics.where_clause.clone(), + extra_constants, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/genesis_build.rs b/frame/support/procedural/src/pallet/parse/genesis_build.rs new file mode 100644 index 0000000000000..1438c400b17f1 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use super::helper; + +/// Definition for pallet genesis build implementation. +pub struct GenesisBuildDef { + /// The index of item in pallet module. + pub index: usize, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The where_clause used. + pub where_clause: Option, + /// The span of the pallet::genesis_build attribute. + pub attr_span: proc_macro2::Span, +} + +impl GenesisBuildDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::genesis_build, expected item impl"; + return Err(syn::Error::new(item.span(), msg)); + }; + + let item_trait = &item.trait_.as_ref() + .ok_or_else(|| { + let msg = "Invalid pallet::genesis_build, expected impl<..> GenesisBuild<..> \ + for GenesisConfig<..>"; + syn::Error::new(item.span(), msg) + })?.1; + + let mut instances = vec![]; + instances.push(helper::check_genesis_builder_usage(&item_trait)?); + + Ok(Self { + attr_span, + index, + instances, + where_clause: item.generics.where_clause.clone(), + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/genesis_config.rs b/frame/support/procedural/src/pallet/parse/genesis_config.rs new file mode 100644 index 0000000000000..729d1241390a5 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/genesis_config.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use super::helper; + +/// Definition for pallet genesis config type. +/// +/// Either: +/// * `struct GenesisConfig` +/// * `enum GenesisConfig` +pub struct GenesisConfigDef { + /// The index of item in pallet module. + pub index: usize, + /// The kind of generic the type `GenesisConfig` has. + pub gen_kind: super::GenericKind, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The ident of genesis_config, can be used for span. + pub genesis_config: syn::Ident, +} + +impl GenesisConfigDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item_span = item.span(); + let (vis, ident, generics) = match &item { + syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), + syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), + _ => { + let msg = "Invalid pallet::genesis_config, expected enum or struct"; + return Err(syn::Error::new(item.span(), msg)); + }, + }; + + let mut instances = vec![]; + // NOTE: GenesisConfig is not allowed to be only generic on I because it is not supported + // by construct_runtime. + if let Some(u) = helper::check_type_def_optional_gen(&generics, ident.span())? { + instances.push(u); + } + + let has_instance = generics.type_params().any(|t| t.ident == "I"); + let has_config = generics.type_params().any(|t| t.ident == "T"); + let gen_kind = super::GenericKind::from_gens(has_config, has_instance) + .expect("Checked by `helper::check_type_def_optional_gen` above"); + + if !matches!(vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::genesis_config, GenesisConfig must be public"; + return Err(syn::Error::new(item_span, msg)); + } + + if ident != "GenesisConfig" { + let msg = "Invalid pallet::genesis_config, ident must `GenesisConfig`"; + return Err(syn::Error::new(ident.span(), msg)); + } + + Ok(GenesisConfigDef { + index, + genesis_config: ident.clone(), + instances, + gen_kind, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs new file mode 100644 index 0000000000000..96ab33bb65eeb --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -0,0 +1,598 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use quote::ToTokens; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(I); + syn::custom_keyword!(compact); + syn::custom_keyword!(GenesisBuild); + syn::custom_keyword!(Config); + syn::custom_keyword!(T); + syn::custom_keyword!(Pallet); + syn::custom_keyword!(origin); +} + +/// A usage of instance, either the trait `Config` has been used with instance or without instance. +/// Used to check for consistency. +#[derive(Clone)] +pub struct InstanceUsage { + pub has_instance: bool, + pub span: proc_macro2::Span, +} + +/// Trait implemented for syn items to get mutable references on their attributes. +/// +/// NOTE: verbatim variants are not supported. +pub trait MutItemAttrs { + fn mut_item_attrs(&mut self) -> Option<&mut Vec>; +} + +/// Take the first pallet attribute (e.g. attribute like `#[pallet..]`) and decode it to `Attr` +pub fn take_first_item_attr(item: &mut impl MutItemAttrs) -> syn::Result> where + Attr: syn::parse::Parse, +{ + let attrs = if let Some(attrs) = item.mut_item_attrs() { + attrs + } else { + return Ok(None) + }; + + if let Some(index) = attrs.iter() + .position(|attr| + attr.path.segments.first().map_or(false, |segment| segment.ident == "pallet") + ) + { + let pallet_attr = attrs.remove(index); + Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) + } else { + Ok(None) + } +} + +/// Take all the pallet attributes (e.g. attribute like `#[pallet..]`) and decode them to `Attr` +pub fn take_item_attrs(item: &mut impl MutItemAttrs) -> syn::Result> where + Attr: syn::parse::Parse, +{ + let mut pallet_attrs = Vec::new(); + + while let Some(attr) = take_first_item_attr(item)? { + pallet_attrs.push(attr) + } + + Ok(pallet_attrs) +} + +impl MutItemAttrs for syn::Item { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + match self { + Self::Const(item) => Some(item.attrs.as_mut()), + Self::Enum(item) => Some(item.attrs.as_mut()), + Self::ExternCrate(item) => Some(item.attrs.as_mut()), + Self::Fn(item) => Some(item.attrs.as_mut()), + Self::ForeignMod(item) => Some(item.attrs.as_mut()), + Self::Impl(item) => Some(item.attrs.as_mut()), + Self::Macro(item) => Some(item.attrs.as_mut()), + Self::Macro2(item) => Some(item.attrs.as_mut()), + Self::Mod(item) => Some(item.attrs.as_mut()), + Self::Static(item) => Some(item.attrs.as_mut()), + Self::Struct(item) => Some(item.attrs.as_mut()), + Self::Trait(item) => Some(item.attrs.as_mut()), + Self::TraitAlias(item) => Some(item.attrs.as_mut()), + Self::Type(item) => Some(item.attrs.as_mut()), + Self::Union(item) => Some(item.attrs.as_mut()), + Self::Use(item) => Some(item.attrs.as_mut()), + _ => None, + } + } +} + + +impl MutItemAttrs for syn::TraitItem { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + match self { + Self::Const(item) => Some(item.attrs.as_mut()), + Self::Method(item) => Some(item.attrs.as_mut()), + Self::Type(item) => Some(item.attrs.as_mut()), + Self::Macro(item) => Some(item.attrs.as_mut()), + _ => None, + } + } +} + +impl MutItemAttrs for Vec { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(self) + } +} + +impl MutItemAttrs for syn::ItemMod { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(&mut self.attrs) + } +} + +/// Return all doc attributes literals found. +pub fn get_doc_literals(attrs: &Vec) -> Vec { + attrs.iter() + .filter_map(|attr| { + if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { + if meta.path.get_ident().map_or(false, |ident| ident == "doc") { + Some(meta.lit) + } else { + None + } + } else { + None + } + }) + .collect() +} + +/// Parse for `()` +struct Unit; +impl syn::parse::Parse for Unit { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let content; + syn::parenthesized!(content in input); + if !content.is_empty() { + let msg = "unexpected tokens, expected nothing inside parenthesis as `()`"; + return Err(syn::Error::new(content.span(), msg)); + } + Ok(Self) + } +} + +/// Parse for `'static` +struct StaticLifetime; +impl syn::parse::Parse for StaticLifetime { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let lifetime = input.parse::()?; + if lifetime.ident != "static" { + let msg = "unexpected tokens, expected `static`"; + return Err(syn::Error::new(lifetime.ident.span(), msg)); + } + Ok(Self) + } +} + +/// Check the syntax: `I: 'static = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_config_def_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result<()> { + let expected = "expected `I: 'static = ()`"; + pub struct CheckTraitDefGenerics; + impl syn::parse::Parse for CheckTraitDefGenerics { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self) + } + } + + syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?; + + Ok(()) +} + +/// Check the syntax: +/// * either `T` +/// * or `T, I = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_type_def_gen_no_bounds( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result { + let expected = "expected `T` or `T, I = ()`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { + has_instance: false, + span: input.span(), + }; + + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + } + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?.0; + + Ok(i) +} + +/// Check the syntax: +/// * either `` (no generics +/// * or `T` +/// * or `T: Config` +/// * or `T, I = ()` +/// * or `T: Config, I: 'static = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return some instance usage if there is some generic, or none otherwise. +pub fn check_type_def_optional_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result> { + let expected = "expected `` or `T` or `T: Config` or `T, I = ()` or \ + `T: Config, I: 'static = ()`"; + pub struct Checker(Option); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + if input.is_empty() { + return Ok(Self(None)) + } + + let mut instance_usage = InstanceUsage { + span: input.span(), + has_instance: false, + }; + + input.parse::()?; + + if input.is_empty() { + return Ok(Self(Some(instance_usage))) + } + + let lookahead = input.lookahead1(); + if lookahead.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(Some(instance_usage))) + } else if lookahead.peek(syn::Token![:]) { + input.parse::()?; + input.parse::()?; + + if input.is_empty() { + return Ok(Self(Some(instance_usage))) + } + + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(Some(instance_usage))) + } else { + Err(lookahead.error()) + } + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?.0 + // Span can be call_site if generic is empty. Thus we replace it. + .map(|mut i| { i.span = span; i }); + + Ok(i) +} + +/// Check the syntax: +/// * either `Pallet` +/// * or `Pallet` +/// +/// return the instance if found. +pub fn check_pallet_struct_usage(type_: &Box) -> syn::Result { + let expected = "expected `Pallet` or `Pallet`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { + span: input.span(), + has_instance: false, + }; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + } + input.parse::]>()?; + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(type_.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid pallet struct: {}", expected); + let mut err = syn::Error::new(type_.span(), msg); + err.combine(e); + err + })?.0; + + Ok(i) +} + +/// Check the generic is: +/// * either `T: Config` +/// * or `T: Config, I: 'static` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return whether it contains instance. +pub fn check_impl_gen( + gen: &syn::Generics, + span: proc_macro2::Span +) -> syn::Result { + let expected = "expected `impl` or `impl, I: 'static>`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { + span: input.span(), + has_instance: false, + }; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![<]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + } + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let mut err = syn::Error::new(span, format!("Invalid generics: {}", expected)); + err.combine(e); + err + })?.0; + + Ok(i) +} + +/// Check the syntax: +/// * or `T` +/// * or `T: Config` +/// * or `T, I = ()` +/// * or `T: Config, I: 'static = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_type_def_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result { + let expected = "expected `T` or `T: Config` or `T, I = ()` or \ + `T: Config, I: 'static = ()`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { + span: input.span(), + has_instance: false, + }; + + input.parse::()?; + + if input.is_empty() { + return Ok(Self(instance_usage)) + } + + let lookahead = input.lookahead1(); + if lookahead.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(instance_usage)) + } else if lookahead.peek(syn::Token![:]) { + input.parse::()?; + input.parse::()?; + + if input.is_empty() { + return Ok(Self(instance_usage)) + } + + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(instance_usage)) + } else { + Err(lookahead.error()) + } + } + } + + let mut i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?.0; + + // Span can be call_site if generic is empty. Thus we replace it. + i.span = span; + + Ok(i) +} + +/// Check the syntax: +/// * either `GenesisBuild` +/// * or `GenesisBuild` +/// +/// return the instance if found. +pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result { + let expected = "expected `GenesisBuild` or `GenesisBuild`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { + span: input.span(), + has_instance: false, + }; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + } + input.parse::]>()?; + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(type_.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid genesis builder: {}", expected); + let mut err = syn::Error::new(type_.span(), msg); + err.combine(e); + err + })?.0; + + Ok(i) +} + +/// Check the syntax: +/// * either `` (no generics) +/// * or `T: Config` +/// * or `T: Config, I: 'static` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_type_value_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result> { + let expected = "expected `` or `T: Config` or `T: Config, I: 'static`"; + pub struct Checker(Option); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + if input.is_empty() { + return Ok(Self(None)) + } + + input.parse::()?; + input.parse::()?; + input.parse::()?; + + let mut instance_usage = InstanceUsage { + span: input.span(), + has_instance: false, + }; + + if input.is_empty() { + return Ok(Self(Some(instance_usage))) + } + + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(Some(instance_usage))) + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?.0 + // Span can be call_site if generic is empty. Thus we replace it. + .map(|mut i| { i.span = span; i }); + + Ok(i) +} diff --git a/frame/support/procedural/src/pallet/parse/hooks.rs b/frame/support/procedural/src/pallet/parse/hooks.rs new file mode 100644 index 0000000000000..585222060e5f4 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/hooks.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use super::helper; + +/// Implementation of the pallet hooks. +pub struct HooksDef { + /// The index of item in pallet. + pub index: usize, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The where_clause used. + pub where_clause: Option, + /// The span of the pallet::hooks attribute. + pub attr_span: proc_macro2::Span, +} + +impl HooksDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::hooks, expected item impl"; + return Err(syn::Error::new(item.span(), msg)); + }; + + let mut instances = vec![]; + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + + let item_trait = &item.trait_.as_ref() + .ok_or_else(|| { + let msg = "Invalid pallet::hooks, expected impl<..> Hooks \ + for Pallet<..>"; + syn::Error::new(item.span(), msg) + })?.1; + + if item_trait.segments.len() != 1 + || item_trait.segments[0].ident != "Hooks" + { + let msg = format!( + "Invalid pallet::hooks, expected trait to be `Hooks` found `{}`\ + , you can import from `frame_support::pallet_prelude`", + quote::quote!(#item_trait) + ); + + return Err(syn::Error::new(item_trait.span(), msg)); + } + + Ok(Self { + attr_span, + index, + instances, + where_clause: item.generics.where_clause.clone(), + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/inherent.rs b/frame/support/procedural/src/pallet/parse/inherent.rs new file mode 100644 index 0000000000000..a3f12b1574981 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/inherent.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use super::helper; + +/// The definition of the pallet inherent implementation. +pub struct InherentDef { + /// The index of inherent item in pallet module. + pub index: usize, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, +} + +impl InherentDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::inherent, expected item impl"; + return Err(syn::Error::new(item.span(), msg)); + }; + + if item.trait_.is_none() { + let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)); + } + + if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { + if last.ident != "ProvideInherent" { + let msg = "Invalid pallet::inherent, expected trait ProvideInherent"; + return Err(syn::Error::new(last.span(), msg)); + } + } else { + let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)); + } + + let mut instances = vec![]; + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + + Ok(InherentDef { index, instances }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs new file mode 100644 index 0000000000000..4d8f239ded0af --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -0,0 +1,465 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Parse for pallet macro. +//! +//! Parse the module into `Def` struct through `Def::try_from` function. + +pub mod config; +pub mod pallet_struct; +pub mod hooks; +pub mod call; +pub mod error; +pub mod origin; +pub mod inherent; +pub mod storage; +pub mod event; +pub mod helper; +pub mod genesis_config; +pub mod genesis_build; +pub mod validate_unsigned; +pub mod type_value; +pub mod extra_constants; + +use syn::spanned::Spanned; +use frame_support_procedural_tools::generate_crate_access_2018; + +/// Parsed definition of a pallet. +pub struct Def { + /// The module items. + /// (their order must not be modified because they are registered in individual definitions). + pub item: syn::ItemMod, + pub config: config::ConfigDef, + pub pallet_struct: pallet_struct::PalletStructDef, + pub hooks: hooks::HooksDef, + pub call: call::CallDef, + pub storages: Vec, + pub error: Option, + pub event: Option, + pub origin: Option, + pub inherent: Option, + pub genesis_config: Option, + pub genesis_build: Option, + pub validate_unsigned: Option, + pub extra_constants: Option, + pub type_values: Vec, + pub frame_system: syn::Ident, + pub frame_support: syn::Ident, +} + +impl Def { + pub fn try_from(mut item: syn::ItemMod) -> syn::Result { + let frame_system = generate_crate_access_2018("frame-system")?; + let frame_support = generate_crate_access_2018("frame-support")?; + + let item_span = item.span(); + let items = &mut item.content.as_mut() + .ok_or_else(|| { + let msg = "Invalid pallet definition, expected mod to be inlined."; + syn::Error::new(item_span, msg) + })?.1; + + let mut config = None; + let mut pallet_struct = None; + let mut hooks = None; + let mut call = None; + let mut error = None; + let mut event = None; + let mut origin = None; + let mut inherent = None; + let mut genesis_config = None; + let mut genesis_build = None; + let mut validate_unsigned = None; + let mut extra_constants = None; + let mut storages = vec![]; + let mut type_values = vec![]; + + for (index, item) in items.iter_mut().enumerate() { + let pallet_attr: Option = helper::take_first_item_attr(item)?; + + match pallet_attr { + Some(PalletAttr::Config(span)) if config.is_none() => + config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?), + Some(PalletAttr::Pallet(span)) if pallet_struct.is_none() => { + let p = pallet_struct::PalletStructDef::try_from(span, index, item)?; + pallet_struct = Some(p); + }, + Some(PalletAttr::Hooks(span)) if hooks.is_none() => { + let m = hooks::HooksDef::try_from(span, index, item)?; + hooks = Some(m); + }, + Some(PalletAttr::Call(span)) if call.is_none() => + call = Some(call::CallDef::try_from(span, index, item)?), + Some(PalletAttr::Error(span)) if error.is_none() => + error = Some(error::ErrorDef::try_from(span, index, item)?), + Some(PalletAttr::Event(span)) if event.is_none() => + event = Some(event::EventDef::try_from(span, index, item)?), + Some(PalletAttr::GenesisConfig(_)) if genesis_config.is_none() => { + let g = genesis_config::GenesisConfigDef::try_from(index, item)?; + genesis_config = Some(g); + }, + Some(PalletAttr::GenesisBuild(span)) if genesis_build.is_none() => { + let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; + genesis_build = Some(g); + }, + Some(PalletAttr::Origin(_)) if origin.is_none() => + origin = Some(origin::OriginDef::try_from(index, item)?), + Some(PalletAttr::Inherent(_)) if inherent.is_none() => + inherent = Some(inherent::InherentDef::try_from(index, item)?), + Some(PalletAttr::Storage(span)) => + storages.push(storage::StorageDef::try_from(span, index, item)?), + Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { + let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; + validate_unsigned = Some(v); + }, + Some(PalletAttr::TypeValue(span)) => + type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), + Some(PalletAttr::ExtraConstants(_)) => { + extra_constants = + Some(extra_constants::ExtraConstantsDef::try_from(index, item)?) + }, + Some(attr) => { + let msg = "Invalid duplicated attribute"; + return Err(syn::Error::new(attr.span(), msg)); + }, + None => (), + } + } + + if genesis_config.is_some() != genesis_build.is_some() { + let msg = format!( + "`#[pallet::genesis_config]` and `#[pallet::genesis_build]` attributes must be \ + either both used or both not used, instead genesis_config is {} and genesis_build \ + is {}", + genesis_config.as_ref().map_or("unused", |_| "used"), + genesis_build.as_ref().map_or("unused", |_| "used"), + ); + return Err(syn::Error::new(item_span, msg)); + } + + let def = Def { + item, + config: config.ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::config]`"))?, + pallet_struct: pallet_struct + .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::pallet]`"))?, + hooks: hooks + .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::hooks]`"))?, + call: call.ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::call]"))?, + extra_constants, + genesis_config, + genesis_build, + validate_unsigned, + error, + event, + origin, + inherent, + storages, + type_values, + frame_system, + frame_support, + }; + + def.check_instance_usage()?; + def.check_event_usage()?; + + Ok(def) + } + + /// Check that usage of trait `Event` is consistent with the definition, i.e. it is declared + /// and trait defines type Event, or not declared and no trait associated type. + fn check_event_usage(&self) -> syn::Result<()> { + match ( + self.config.has_event_type, + self.event.is_some(), + ) { + (true, false) => { + let msg = "Invalid usage of Event, `Config` contains associated type `Event`, \ + but enum `Event` is not declared (i.e. no use of `#[pallet::event]`). \ + Note that type `Event` in trait is reserved to work alongside pallet event."; + Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) + }, + (false, true) => { + let msg = "Invalid usage of Event, `Config` contains no associated type \ + `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). \ + An Event associated type must be declare on trait `Config`."; + Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) + }, + _ => Ok(()) + } + } + + /// Check that usage of trait `Config` is consistent with the definition, i.e. it is used with + /// instance iff it is defined with instance. + fn check_instance_usage(&self) -> syn::Result<()> { + let mut instances = vec![]; + instances.extend_from_slice(&self.call.instances[..]); + instances.extend_from_slice(&self.pallet_struct.instances[..]); + instances.extend_from_slice(&self.hooks.instances[..]); + instances.extend(&mut self.storages.iter().flat_map(|s| s.instances.clone())); + if let Some(event) = &self.event { + instances.extend_from_slice(&event.instances[..]); + } + if let Some(error) = &self.error { + instances.extend_from_slice(&error.instances[..]); + } + if let Some(inherent) = &self.inherent { + instances.extend_from_slice(&inherent.instances[..]); + } + if let Some(origin) = &self.origin { + instances.extend_from_slice(&origin.instances[..]); + } + if let Some(genesis_config) = &self.genesis_config { + instances.extend_from_slice(&genesis_config.instances[..]); + } + if let Some(genesis_build) = &self.genesis_build { + instances.extend_from_slice(&genesis_build.instances[..]); + } + if let Some(extra_constants) = &self.extra_constants { + instances.extend_from_slice(&extra_constants.instances[..]); + } + + let mut errors = instances.into_iter() + .filter_map(|instances| { + if instances.has_instance == self.config.has_instance { + return None + } + let msg = if self.config.has_instance { + "Invalid generic declaration, trait is defined with instance but generic use none" + } else { + "Invalid generic declaration, trait is defined without instance but generic use \ + some" + }; + Some(syn::Error::new(instances.span, msg)) + }); + + if let Some(mut first_error) = errors.next() { + for error in errors { + first_error.combine(error) + } + Err(first_error) + } else { + Ok(()) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T: Config` + /// * or `T: Config, I: 'static` + pub fn type_impl_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T: Config, I: 'static) + } else { + quote::quote_spanned!(span => T: Config) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T: Config` + /// * or `T: Config, I: 'static = ()` + pub fn type_decl_bounded_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T: Config, I: 'static = ()) + } else { + quote::quote_spanned!(span => T: Config) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T` + /// * or `T, I = ()` + pub fn type_decl_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T, I = ()) + } else { + quote::quote_spanned!(span => T) + } + } + + /// Depending on if pallet is instantiable: + /// * either `` + /// * or `` + /// to be used when using pallet trait `Config` + pub fn trait_use_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => ) + } else { + quote::quote_spanned!(span => ) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T` + /// * or `T, I` + pub fn type_use_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T, I) + } else { + quote::quote_spanned!(span => T) + } + } +} + +/// Some generic kind for type which can be not generic, or generic over config, +/// or generic over config and instance, but not generic only over instance. +pub enum GenericKind { + None, + Config, + ConfigAndInstance, +} + +impl GenericKind { + /// Return Err if it is only generics over instance but not over config. + pub fn from_gens(has_config: bool, has_instance: bool) -> Result { + match (has_config, has_instance) { + (false, false) => Ok(GenericKind::None), + (true, false) => Ok(GenericKind::Config), + (true, true) => Ok(GenericKind::ConfigAndInstance), + (false, true) => Err(()), + } + } + + /// Return the generic to be used when using the type. + /// + /// Depending on its definition it can be: ``, `T` or `T, I` + pub fn type_use_gen(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + match self { + GenericKind::None => quote::quote!(), + GenericKind::Config => quote::quote_spanned!(span => T), + GenericKind::ConfigAndInstance => quote::quote_spanned!(span => T, I), + } + } + + /// Return the generic to be used in `impl<..>` when implementing on the type. + pub fn type_impl_gen(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + match self { + GenericKind::None => quote::quote!(), + GenericKind::Config => quote::quote_spanned!(span => T: Config), + GenericKind::ConfigAndInstance => quote::quote_spanned!(span => T: Config, I: 'static), + } + } + + /// Return whereas the type has some generic. + pub fn is_generic(&self) -> bool { + match self { + GenericKind::None => false, + GenericKind::Config | GenericKind::ConfigAndInstance => true, + } + } +} + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(origin); + syn::custom_keyword!(call); + syn::custom_keyword!(event); + syn::custom_keyword!(config); + syn::custom_keyword!(hooks); + syn::custom_keyword!(inherent); + syn::custom_keyword!(error); + syn::custom_keyword!(storage); + syn::custom_keyword!(genesis_build); + syn::custom_keyword!(genesis_config); + syn::custom_keyword!(validate_unsigned); + syn::custom_keyword!(type_value); + syn::custom_keyword!(pallet); + syn::custom_keyword!(generate_store); + syn::custom_keyword!(Store); + syn::custom_keyword!(extra_constants); +} + +/// Parse attributes for item in pallet module +/// syntax must be `pallet::` (e.g. `#[pallet::config]`) +enum PalletAttr { + Config(proc_macro2::Span), + Pallet(proc_macro2::Span), + Hooks(proc_macro2::Span), + Call(proc_macro2::Span), + Error(proc_macro2::Span), + Event(proc_macro2::Span), + Origin(proc_macro2::Span), + Inherent(proc_macro2::Span), + Storage(proc_macro2::Span), + GenesisConfig(proc_macro2::Span), + GenesisBuild(proc_macro2::Span), + ValidateUnsigned(proc_macro2::Span), + TypeValue(proc_macro2::Span), + ExtraConstants(proc_macro2::Span), +} + +impl PalletAttr { + fn span(&self) -> proc_macro2::Span { + match self { + Self::Config(span) => *span, + Self::Pallet(span) => *span, + Self::Hooks(span) => *span, + Self::Call(span) => *span, + Self::Error(span) => *span, + Self::Event(span) => *span, + Self::Origin(span) => *span, + Self::Inherent(span) => *span, + Self::Storage(span) => *span, + Self::GenesisConfig(span) => *span, + Self::GenesisBuild(span) => *span, + Self::ValidateUnsigned(span) => *span, + Self::TypeValue(span) => *span, + Self::ExtraConstants(span) => *span, + } + } +} + +impl syn::parse::Parse for PalletAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::config) { + Ok(PalletAttr::Config(content.parse::()?.span())) + } else if lookahead.peek(keyword::pallet) { + Ok(PalletAttr::Pallet(content.parse::()?.span())) + } else if lookahead.peek(keyword::hooks) { + Ok(PalletAttr::Hooks(content.parse::()?.span())) + } else if lookahead.peek(keyword::call) { + Ok(PalletAttr::Call(content.parse::()?.span())) + } else if lookahead.peek(keyword::error) { + Ok(PalletAttr::Error(content.parse::()?.span())) + } else if lookahead.peek(keyword::event) { + Ok(PalletAttr::Event(content.parse::()?.span())) + } else if lookahead.peek(keyword::origin) { + Ok(PalletAttr::Origin(content.parse::()?.span())) + } else if lookahead.peek(keyword::inherent) { + Ok(PalletAttr::Inherent(content.parse::()?.span())) + } else if lookahead.peek(keyword::storage) { + Ok(PalletAttr::Storage(content.parse::()?.span())) + } else if lookahead.peek(keyword::genesis_config) { + Ok(PalletAttr::GenesisConfig(content.parse::()?.span())) + } else if lookahead.peek(keyword::genesis_build) { + Ok(PalletAttr::GenesisBuild(content.parse::()?.span())) + } else if lookahead.peek(keyword::validate_unsigned) { + Ok(PalletAttr::ValidateUnsigned(content.parse::()?.span())) + } else if lookahead.peek(keyword::type_value) { + Ok(PalletAttr::TypeValue(content.parse::()?.span())) + } else if lookahead.peek(keyword::extra_constants) { + Ok(PalletAttr::ExtraConstants(content.parse::()?.span())) + } else { + Err(lookahead.error()) + } + } +} diff --git a/frame/support/procedural/src/pallet/parse/origin.rs b/frame/support/procedural/src/pallet/parse/origin.rs new file mode 100644 index 0000000000000..2b47978b808a8 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/origin.rs @@ -0,0 +1,80 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use super::helper; + +/// Definition of the pallet origin type. +/// +/// Either: +/// * `type Origin` +/// * `struct Origin` +/// * `enum Origin` +pub struct OriginDef { + /// The index of item in pallet module. + pub index: usize, + pub has_instance: bool, + pub is_generic: bool, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, +} + +impl OriginDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item_span = item.span(); + let (vis, ident, generics) = match &item { + syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), + syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), + syn::Item::Type(item) => (&item.vis, &item.ident, &item.generics), + _ => { + let msg = "Invalid pallet::origin, expected enum or struct or type"; + return Err(syn::Error::new(item.span(), msg)); + }, + }; + + let has_instance = generics.params.len() == 2; + let is_generic = !generics.params.is_empty(); + + let mut instances = vec![]; + if let Some(u) = helper::check_type_def_optional_gen(&generics, item.span())? { + instances.push(u); + } else { + // construct_runtime only allow generic event for instantiable pallet. + instances.push(helper::InstanceUsage { + has_instance: false, + span: ident.span(), + }) + } + + if !matches!(vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::origin, Origin must be public"; + return Err(syn::Error::new(item_span, msg)); + } + + if ident != "Origin" { + let msg = "Invalid pallet::origin, ident must `Origin`"; + return Err(syn::Error::new(ident.span(), msg)); + } + + Ok(OriginDef { + index, + has_instance, + is_generic, + instances, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs new file mode 100644 index 0000000000000..1c979741d9803 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; +use quote::ToTokens; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(pallet); + syn::custom_keyword!(Pallet); + syn::custom_keyword!(generate_store); + syn::custom_keyword!(Store); +} + +/// Definition of the pallet pallet. +pub struct PalletStructDef { + /// The index of item in pallet pallet. + pub index: usize, + /// A set of usage of instance, must be check for consistency with config trait. + pub instances: Vec, + /// The keyword Pallet used (contains span). + pub pallet: keyword::Pallet, + /// Whether the trait `Store` must be generated. + pub store: Option<(syn::Visibility, keyword::Store)>, + /// The span of the pallet::pallet attribute. + pub attr_span: proc_macro2::Span, +} + +/// Parse for `#[pallet::generate_store($vis trait Store)]` +pub struct PalletStructAttr { + vis: syn::Visibility, + keyword: keyword::Store, +} + +impl syn::parse::Parse for PalletStructAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + content.parse::()?; + + let generate_content; + syn::parenthesized!(generate_content in content); + let vis = generate_content.parse::()?; + generate_content.parse::()?; + let keyword = generate_content.parse::()?; + Ok(Self { vis, keyword }) + } +} + +impl PalletStructDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Struct(item) = item { + item + } else { + let msg = "Invalid pallet::pallet, expected struct definition"; + return Err(syn::Error::new(item.span(), msg)); + }; + + let mut event_attrs: Vec = helper::take_item_attrs(&mut item.attrs)?; + if event_attrs.len() > 1 { + let msg = "Invalid pallet::pallet, multiple argument pallet::generate_store found"; + return Err(syn::Error::new(event_attrs[1].keyword.span(), msg)); + } + let store = event_attrs.pop().map(|attr| (attr.vis, attr.keyword)); + + let pallet = syn::parse2::(item.ident.to_token_stream())?; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::pallet, Pallet must be public"; + return Err(syn::Error::new(item.span(), msg)); + } + + if item.generics.where_clause.is_some() { + let msg = "Invalid pallet::pallet, where clause not supported on Pallet declaration"; + return Err(syn::Error::new(item.generics.where_clause.span(), msg)); + } + + let mut instances = vec![]; + instances.push(helper::check_type_def_gen_no_bounds(&item.generics, item.ident.span())?); + + Ok(Self { index, instances, pallet, store, attr_span }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs new file mode 100644 index 0000000000000..c0da266cfca2b --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -0,0 +1,228 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; +use quote::ToTokens; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Error); + syn::custom_keyword!(pallet); + syn::custom_keyword!(getter); + syn::custom_keyword!(OptionQuery); + syn::custom_keyword!(ValueQuery); +} + +/// Parse for `#[pallet::getter(fn dummy)]` +pub struct PalletStorageAttr { + getter: syn::Ident, +} + +impl syn::parse::Parse for PalletStorageAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + content.parse::()?; + + let generate_content; + syn::parenthesized!(generate_content in content); + generate_content.parse::()?; + Ok(Self { getter: generate_content.parse::()? }) + } +} + +/// The value and key types used by storages. Needed to expand metadata. +pub enum Metadata{ + Value { value: syn::GenericArgument }, + Map { value: syn::GenericArgument, key: syn::GenericArgument }, + DoubleMap { + value: syn::GenericArgument, + key1: syn::GenericArgument, + key2: syn::GenericArgument + }, +} + +pub enum QueryKind { + OptionQuery, + ValueQuery, +} + +/// Definition of a storage, storage is a storage type like +/// `type MyStorage = StorageValue` +/// The keys and values types are parsed in order to get metadata +pub struct StorageDef { + /// The index of error item in pallet module. + pub index: usize, + /// Visibility of the storage type. + pub vis: syn::Visibility, + /// The type ident, to generate the StoragePrefix for. + pub ident: syn::Ident, + /// The keys and value metadata of the storage. + pub metadata: Metadata, + /// The doc associated to the storage. + pub docs: Vec, + /// A set of usage of instance, must be check for consistency with config. + pub instances: Vec, + /// Optional getter to generate. If some then query_kind is ensured to be some as well. + pub getter: Option, + /// Whereas the querytype of the storage is OptionQuery or ValueQuery. + /// Note that this is best effort as it can't be determined when QueryKind is generic, and + /// result can be false if user do some unexpected type alias. + pub query_kind: Option, + /// Where clause of type definition. + pub where_clause: Option, + /// The span of the pallet::storage attribute. + pub attr_span: proc_macro2::Span, +} + +/// In `Foo` retrieve the argument at given position, i.e. A is argument at position 0. +fn retrieve_arg( + segment: &syn::PathSegment, + arg_pos: usize, +) -> syn::Result { + if let syn::PathArguments::AngleBracketed(args) = &segment.arguments { + if arg_pos < args.args.len() { + Ok(args.args[arg_pos].clone()) + } else { + let msg = format!("pallet::storage unexpected number of generic argument, expected at \ + least {} args, found {}", arg_pos + 1, args.args.len()); + Err(syn::Error::new(args.span(), msg)) + } + } else { + let msg = format!("pallet::storage unexpected number of generic argument, expected at \ + least {} args, found none", arg_pos + 1); + Err(syn::Error::new(segment.span(), msg)) + } +} + +impl StorageDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Type(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expected item type")); + }; + + let mut attrs: Vec = helper::take_item_attrs(&mut item.attrs)?; + if attrs.len() > 1 { + let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; + return Err(syn::Error::new(attrs[1].getter.span(), msg)); + } + let getter = attrs.pop().map(|attr| attr.getter); + + let mut instances = vec![]; + instances.push(helper::check_type_def_gen(&item.generics, item.ident.span())?); + + let where_clause = item.generics.where_clause.clone(); + let docs = helper::get_doc_literals(&item.attrs); + + let typ = if let syn::Type::Path(typ) = &*item.ty { + typ + } else { + let msg = "Invalid pallet::storage, expected type path"; + return Err(syn::Error::new(item.ty.span(), msg)); + }; + + if typ.path.segments.len() != 1 { + let msg = "Invalid pallet::storage, expected type path with one segment"; + return Err(syn::Error::new(item.ty.span(), msg)); + } + + let query_kind; + let metadata = match &*typ.path.segments[0].ident.to_string() { + "StorageValue" => { + query_kind = retrieve_arg(&typ.path.segments[0], 2); + Metadata::Value { + value: retrieve_arg(&typ.path.segments[0], 1)?, + } + } + "StorageMap" => { + query_kind = retrieve_arg(&typ.path.segments[0], 4); + Metadata::Map { + key: retrieve_arg(&typ.path.segments[0], 2)?, + value: retrieve_arg(&typ.path.segments[0], 3)?, + } + } + "StorageDoubleMap" => { + query_kind = retrieve_arg(&typ.path.segments[0], 6); + Metadata::DoubleMap { + key1: retrieve_arg(&typ.path.segments[0], 2)?, + key2: retrieve_arg(&typ.path.segments[0], 4)?, + value: retrieve_arg(&typ.path.segments[0], 5)?, + } + } + found => { + let msg = format!( + "Invalid pallet::storage, expected ident: `StorageValue` or \ + `StorageMap` or `StorageDoubleMap` in order to expand metadata, found \ + `{}`", + found, + ); + return Err(syn::Error::new(item.ty.span(), msg)); + } + }; + let query_kind = query_kind + .map(|query_kind| match query_kind { + syn::GenericArgument::Type(syn::Type::Path(path)) + if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") + => Some(QueryKind::OptionQuery), + syn::GenericArgument::Type(syn::Type::Path(path)) + if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") + => Some(QueryKind::ValueQuery), + _ => None, + }) + .unwrap_or(Some(QueryKind::OptionQuery)); // This value must match the default generic. + + if query_kind.is_none() && getter.is_some() { + let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ + identifiable. QueryKind must be `OptionQuery`, `ValueQuery`, or default one to be \ + identifiable."; + return Err(syn::Error::new(getter.unwrap().span(), msg)); + } + + let prefix_arg = retrieve_arg(&typ.path.segments[0], 0)?; + syn::parse2::(prefix_arg.to_token_stream()) + .map_err(|e| { + let msg = "Invalid use of `#[pallet::storage]`, the type first generic argument \ + must be `_`, the final argument is automatically set by macro."; + let mut err = syn::Error::new(prefix_arg.span(), msg); + err.combine(e); + err + })?; + + Ok(StorageDef { + attr_span, + index, + vis: item.vis.clone(), + ident: item.ident.clone(), + instances, + metadata, + docs, + getter, + query_kind, + where_clause, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/type_value.rs b/frame/support/procedural/src/pallet/parse/type_value.rs new file mode 100644 index 0000000000000..58e6105818e01 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/type_value.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// Definition of type value. Just a function which is expanded to a struct implementing `Get`. +pub struct TypeValueDef { + /// The index of error item in pallet module. + pub index: usize, + /// Visibility of the struct to generate. + pub vis: syn::Visibility, + /// Ident of the struct to generate. + pub ident: syn::Ident, + /// The type return by Get. + pub type_: Box, + /// The block returning the value to get + pub block: Box, + /// If type value is generic over `T` (or `T` and `I` for instantiable pallet) + pub is_generic: bool, + /// A set of usage of instance, must be check for consistency with config. + pub instances: Vec, + /// The where clause of the function. + pub where_clause: Option, + /// The span of the pallet::type_value attribute. + pub attr_span: proc_macro2::Span, +} + +impl TypeValueDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Fn(item) = item { + item + } else { + let msg = "Invalid pallet::type_value, expected item fn"; + return Err(syn::Error::new(item.span(), msg)); + }; + + + if !item.attrs.is_empty() { + let msg = "Invalid pallet::type_value, unexpected attribute"; + return Err(syn::Error::new(item.attrs[0].span(), msg)); + } + + if let Some(span) = item.sig.constness.as_ref().map(|t| t.span()) + .or_else(|| item.sig.asyncness.as_ref().map(|t| t.span())) + .or_else(|| item.sig.unsafety.as_ref().map(|t| t.span())) + .or_else(|| item.sig.abi.as_ref().map(|t| t.span())) + .or_else(|| item.sig.variadic.as_ref().map(|t| t.span())) + { + let msg = "Invalid pallet::type_value, unexpected token"; + return Err(syn::Error::new(span, msg)); + } + + if !item.sig.inputs.is_empty() { + let msg = "Invalid pallet::type_value, unexpected argument"; + return Err(syn::Error::new(item.sig.inputs[0].span(), msg)); + } + + let vis = item.vis.clone(); + let ident = item.sig.ident.clone(); + let block = item.block.clone(); + let type_ = match item.sig.output.clone() { + syn::ReturnType::Type(_, type_) => type_, + syn::ReturnType::Default => { + let msg = "Invalid pallet::type_value, expected return type"; + return Err(syn::Error::new(item.sig.span(), msg)); + }, + }; + + let mut instances = vec![]; + if let Some(usage) = helper::check_type_value_gen(&item.sig.generics, item.sig.span())? { + instances.push(usage); + } + + let is_generic = item.sig.generics.type_params().count() > 0; + let where_clause = item.sig.generics.where_clause.clone(); + + Ok(TypeValueDef { + attr_span, + index, + is_generic, + vis, + ident, + block, + type_, + instances, + where_clause, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs new file mode 100644 index 0000000000000..0a406413f3940 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +use super::helper; + +/// The definition of the pallet validate unsigned implementation. +pub struct ValidateUnsignedDef { + /// The index of validate unsigned item in pallet module. + pub index: usize, + /// A set of usage of instance, must be check for consistency with config. + pub instances: Vec, +} + +impl ValidateUnsignedDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::validate_unsigned, expected item impl"; + return Err(syn::Error::new(item.span(), msg)); + }; + + if item.trait_.is_none() { + let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ + Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)); + } + + if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { + if last.ident != "ValidateUnsigned" { + let msg = "Invalid pallet::validate_unsigned, expected trait ValidateUnsigned"; + return Err(syn::Error::new(last.span(), msg)); + } + } else { + let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ + Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)); + } + + let mut instances = vec![]; + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + + Ok(ValidateUnsignedDef { index, instances }) + } +} diff --git a/frame/support/procedural/src/pallet_version.rs b/frame/support/procedural/src/pallet_version.rs index ffd4b41208d56..0f3c478d4977a 100644 --- a/frame/support/procedural/src/pallet_version.rs +++ b/frame/support/procedural/src/pallet_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,7 @@ use frame_support_procedural_tools::generate_crate_access_2018; /// The version is parsed into the requested destination type. fn get_version(version_env: &str) -> std::result::Result { let version = env::var(version_env) - .expect(&format!("`{}` is always set by cargo; qed", version_env)); + .unwrap_or_else(|_| panic!("`{}` is always set by cargo; qed", version_env)); T::from_str(&version).map_err(drop) } @@ -52,7 +52,7 @@ pub fn crate_to_pallet_version(input: proc_macro::TokenStream) -> Result("CARGO_PKG_VERSION_PATCH") .map_err(|_| create_error("Patch version needs to fit into `u8`"))?; - let crate_ = generate_crate_access_2018()?; + let crate_ = generate_crate_access_2018("frame-support")?; Ok(quote::quote! { #crate_::traits::PalletVersion { diff --git a/frame/support/procedural/src/partial_eq_no_bound.rs b/frame/support/procedural/src/partial_eq_no_bound.rs index df8d661a2b269..1c37be8021c9e 100644 --- a/frame/support/procedural/src/partial_eq_no_bound.rs +++ b/frame/support/procedural/src/partial_eq_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/storage/genesis_config/builder_def.rs b/frame/support/procedural/src/storage/genesis_config/builder_def.rs index a045794529c95..0cbfa04787f78 100644 --- a/frame/support/procedural/src/storage/genesis_config/builder_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/builder_def.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index 6339134ea0d22..300e47bc850ee 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,11 +33,11 @@ pub struct GenesisConfigFieldDef { pub struct GenesisConfigDef { pub is_generic: bool, pub fields: Vec, - /// For example: `, I: Instance=DefaultInstance>`. + /// For example: `, I: Instance=DefaultInstance>`. pub genesis_struct_decl: TokenStream, /// For example: ``. pub genesis_struct: TokenStream, - /// For example: `, I: Instance>`. + /// For example: `, I: Instance>`. pub genesis_impl: TokenStream, /// The where clause to use to constrain generics if genesis config is generic. pub genesis_where_clause: Option, diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index 27fbdd2cd38b5..87dfabcefbaaa 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,8 @@ use proc_macro2::{TokenStream, Span}; use quote::quote; use super::DeclStorageDefExt; -use genesis_config_def::GenesisConfigDef; -use builder_def::BuilderDef; +pub use genesis_config_def::GenesisConfigDef; +pub use builder_def::BuilderDef; mod genesis_config_def; mod builder_def; diff --git a/frame/support/procedural/src/storage/getters.rs b/frame/support/procedural/src/storage/getters.rs index 5507db4630596..65a3519033aa2 100644 --- a/frame/support/procedural/src/storage/getters.rs +++ b/frame/support/procedural/src/storage/getters.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/storage/instance_trait.rs b/frame/support/procedural/src/storage/instance_trait.rs index a28c3ae622082..5468c3d344193 100644 --- a/frame/support/procedural/src/storage/instance_trait.rs +++ b/frame/support/procedural/src/storage/instance_trait.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs index 065320cd018ae..c321386ae1dc4 100644 --- a/frame/support/procedural/src/storage/metadata.rs +++ b/frame/support/procedural/src/storage/metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 0aa0a3cad7cd1..2f9625d2c941e 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,6 +24,9 @@ mod getters; mod metadata; mod instance_trait; mod genesis_config; +mod print_pallet_upgrade; + +pub(crate) use instance_trait::INHERENT_INSTANCE_NAME; use quote::quote; use frame_support_procedural_tools::{ @@ -42,7 +45,7 @@ pub struct DeclStorageDef { module_name: syn::Ident, /// Usually `T`. module_runtime_generic: syn::Ident, - /// Usually `Trait` + /// Usually `Config` module_runtime_trait: syn::Path, /// For instantiable module: usually `I: Instance=DefaultInstance`. module_instance: Option, @@ -77,7 +80,7 @@ pub struct DeclStorageDefExt { module_name: syn::Ident, /// Usually `T`. module_runtime_generic: syn::Ident, - /// Usually `Trait`. + /// Usually `Config`. module_runtime_trait: syn::Path, /// For instantiable module: usually `I: Instance=DefaultInstance`. module_instance: Option, @@ -93,7 +96,7 @@ pub struct DeclStorageDefExt { crate_name: syn::Ident, /// Full struct expansion: `Module`. module_struct: proc_macro2::TokenStream, - /// Impl block for module: ``. + /// Impl block for module: ``. module_impl: proc_macro2::TokenStream, /// For instantiable: `I`. optional_instance: Option, @@ -212,7 +215,7 @@ pub struct StorageLineDefExt { storage_struct: proc_macro2::TokenStream, /// If storage is generic over runtime then `T`. optional_storage_runtime_comma: Option, - /// If storage is generic over runtime then `T: Trait`. + /// If storage is generic over runtime then `T: Config`. optional_storage_runtime_bound_comma: Option, /// The where clause to use to constrain generics if storage is generic over runtime. optional_storage_where_clause: Option, @@ -397,6 +400,8 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr let def = syn::parse_macro_input!(input as DeclStorageDef); let def_ext = DeclStorageDefExt::from(def); + print_pallet_upgrade::maybe_print_pallet_upgrade(&def_ext); + let hidden_crate_name = def_ext.hidden_crate.as_ref().map(|i| i.to_string()) .unwrap_or_else(|| "decl_storage".to_string()); diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index 504af6d0ffcad..2ff7f1fbf38c9 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,7 +47,7 @@ mod keyword { pub struct Opt

{ pub inner: Option

, } -impl syn::export::ToTokens for Opt

{ +impl quote::ToTokens for Opt

{ fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { if let Some(ref p) = self.inner { p.to_tokens(tokens); diff --git a/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/frame/support/procedural/src/storage/print_pallet_upgrade.rs new file mode 100644 index 0000000000000..447d13898e8df --- /dev/null +++ b/frame/support/procedural/src/storage/print_pallet_upgrade.rs @@ -0,0 +1,383 @@ +use super::StorageLineTypeDef; +use quote::ToTokens; +use frame_support_procedural_tools::clean_type_string; + +/// Environment variable that tells us to print pallet upgrade helper. +const PRINT_PALLET_UPGRADE: &str = "PRINT_PALLET_UPGRADE"; + +fn check_print_pallet_upgrade() -> bool { + std::env::var(PRINT_PALLET_UPGRADE).is_ok() +} + +/// Convert visibilty as now objects are defined in a module. +fn convert_vis(vis: &syn::Visibility) -> &'static str{ + match vis { + syn::Visibility::Inherited => "pub(super)", + syn::Visibility::Public(_) => "pub", + _ => "/* TODO_VISIBILITY */", + } +} + +/// fn to convert to token stream then string using display and then call clean_type_string on it. +fn to_cleaned_string(t: impl quote::ToTokens) -> String { + clean_type_string(&format!("{}", t.into_token_stream())) +} + +/// Print an incomplete upgrade from decl_storage macro to new pallet attribute. +pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { + if !check_print_pallet_upgrade() { + return + } + + let scrate = "e::quote!(frame_support); + + let config_gen = if def.optional_instance.is_some() { + "" + } else { + Default::default() + }; + + let impl_gen = if def.optional_instance.is_some() { + ", I: 'static>" + } else { + "" + }; + + let decl_gen = if def.optional_instance.is_some() { + "" + } else { + "" + }; + + let full_decl_gen = if def.optional_instance.is_some() { + ", I: 'static = ()>" + } else { + "" + }; + + let use_gen = if def.optional_instance.is_some() { + "" + } else { + "" + }; + + let use_gen_tuple = if def.optional_instance.is_some() { + "<(T, I)>" + } else { + "" + }; + + let mut genesis_config = String::new(); + let mut genesis_build = String::new(); + + let genesis_config_builder_def = super::genesis_config::BuilderDef::from_def(scrate, def); + if !genesis_config_builder_def.blocks.is_empty() { + let genesis_config_def = match super::genesis_config::GenesisConfigDef::from_def(def) { + Ok(g) => g, + Err(err) => { + println!("Could not print upgrade due compile error: {:?}", err); + return + }, + }; + + let genesis_config_impl_gen = if genesis_config_def.is_generic { + impl_gen + } else { + Default::default() + }; + + let genesis_config_use_gen = if genesis_config_def.is_generic { + use_gen + } else { + Default::default() + }; + + let genesis_config_decl_gen = if genesis_config_def.is_generic { + if def.optional_instance.is_some() { + ", I: 'static = ()>" + } else { + "" + } + } else { + Default::default() + }; + + let mut genesis_config_decl_fields = String::new(); + let mut genesis_config_default_fields = String::new(); + for field in &genesis_config_def.fields { + genesis_config_decl_fields.push_str(&format!(" + {attrs}pub {name}: {typ},", + attrs = field.attrs.iter() + .fold(String::new(), |res, attr| { + format!("{}#[{}] + ", + res, attr.to_token_stream()) + }), + name = field.name, + typ = to_cleaned_string(&field.typ), + )); + + genesis_config_default_fields.push_str(&format!(" + {name}: {default},", + name = field.name, + default = to_cleaned_string(&field.default), + )); + } + + genesis_config = format!(" + #[pallet::genesis_config] + pub struct GenesisConfig{genesis_config_decl_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{{genesis_config_decl_fields} + }} + + #[cfg(feature = \"std\")] + impl{genesis_config_impl_gen} Default for GenesisConfig{genesis_config_use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + fn default() -> Self {{ + Self {{{genesis_config_default_fields} + }} + }} + }}", + genesis_config_decl_gen = genesis_config_decl_gen, + genesis_config_decl_fields = genesis_config_decl_fields, + genesis_config_impl_gen = genesis_config_impl_gen, + genesis_config_default_fields = genesis_config_default_fields, + genesis_config_use_gen = genesis_config_use_gen, + ); + + let genesis_config_build = genesis_config_builder_def.blocks.iter() + .fold(String::new(), |res, block| { + format!("{} + {}", + res, + to_cleaned_string(block), + ) + }); + + genesis_build = format!(" + #[pallet::genesis_build] + impl{impl_gen} GenesisBuild{use_gen} for GenesisConfig{genesis_config_use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + fn build(&self) {{{genesis_config_build} + }} + }}", + impl_gen = impl_gen, + use_gen = use_gen, + genesis_config_use_gen = genesis_config_use_gen, + genesis_config_build = genesis_config_build, + ); + } + + let mut storages = String::new(); + for line in &def.storage_lines { + let storage_vis = convert_vis(&line.visibility); + + let getter = if let Some(getter) = &line.getter { + format!(" + #[pallet::getter(fn {getter})]", + getter = getter + ) + } else { + Default::default() + }; + + let value_type = &line.value_type; + + let default_value_type_value = line.default_value.as_ref() + .map(|default_expr| { + format!(" + #[pallet::type_value] + {storage_vis} fn DefaultFor{name} /* TODO_MAYBE_GENERICS */ () -> {value_type} {{ + {default_expr} + }} +", + name = line.name, + storage_vis = storage_vis, + value_type = to_cleaned_string(&line.value_type), + default_expr = to_cleaned_string(&default_expr), + ) + }) + .unwrap_or_else(String::new); + + let comma_query_kind = if line.is_option { + if line.default_value.is_some() { + ", OptionQuery" + } else { + Default::default() + } + } else { + ", ValueQuery" + }; + + let comma_default_value_getter_name = line.default_value.as_ref() + .map(|_| format!(", DefaultFor{}", line.name)) + .unwrap_or_else(String::new); + + let typ = match &line.storage_type { + StorageLineTypeDef::Map(map) => { + format!("StorageMap<_, {hasher}, {key}, {value_type}{comma_query_kind}\ + {comma_default_value_getter_name}>", + hasher = &map.hasher.to_storage_hasher_struct(), + key = to_cleaned_string(&map.key), + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + StorageLineTypeDef::DoubleMap(double_map) => { + format!("StorageDoubleMap<_, {hasher1}, {key1}, {hasher2}, {key2}, {value_type}\ + {comma_query_kind}{comma_default_value_getter_name}>", + hasher1 = double_map.hasher1.to_storage_hasher_struct(), + key1 = to_cleaned_string(&double_map.key1), + hasher2 = double_map.hasher2.to_storage_hasher_struct(), + key2 = to_cleaned_string(&double_map.key2), + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + StorageLineTypeDef::Simple(_) => { + format!("StorageValue<_, {value_type}{comma_query_kind}\ + {comma_default_value_getter_name}>", + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + }; + + let additional_comment = if line.is_option && line.default_value.is_some() { + " // TODO: This type of storage is no longer supported: `OptionQuery` cannot be used \ + alongside a not-none value on empty storage. Please use `ValueQuery` instead." + } else { + "" + }; + + storages.push_str(&format!(" +{default_value_type_value}{doc} + #[pallet::storage]{getter} + {storage_vis} type {name}{full_decl_gen} = {typ};{additional_comment}", + default_value_type_value = default_value_type_value, + getter = getter, + storage_vis = storage_vis, + name = line.name, + full_decl_gen = full_decl_gen, + typ = typ, + additional_comment = additional_comment, + doc = line.doc_attrs.iter() + .fold(String::new(), |mut res, attr| { + if let syn::Meta::NameValue(name_value) = attr { + if name_value.path.is_ident("doc") { + if let syn::Lit::Str(string) = &name_value.lit { + res = format!("{} + ///{}", + res, + string.value(), + ); + } + } + } + res + }), + )); + } + + let deprecated_instance_stuff = if def.optional_instance.is_some() { + " + /// Old name for default instance generated by decl_storage. + #[deprecated(note=\"use `()` instead\")] + pub type DefaultInstance = (); + + /// Old name for instance trait used by old macros. + #[deprecated(note=\"use `'static` instead\")] + pub trait Instance: 'static {} + impl Instance for I {}" + } else { + "" + }; + + println!(" +// Template for pallet upgrade for {pallet_name} + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet {{ + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + #[pallet::config] + pub trait Config{config_gen}: frame_system::Config + // TODO_MAYBE_ADDITIONAL_BOUNDS_AND_WHERE_CLAUSE + {{ + // TODO_ASSOCIATED_TYPE_AND_CONSTANTS + }} + + {deprecated_instance_stuff} + + #[pallet::pallet] + #[pallet::generate_store({store_vis} trait Store)] + pub struct Pallet{decl_gen}(PhantomData{use_gen_tuple}); + + #[pallet::hooks] + impl{impl_gen} Hooks> for Pallet{use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + // TODO_ON_FINALIZE + // TODO_ON_INITIALIZE + // TODO_ON_RUNTIME_UPGRADE + // TODO_INTEGRITY_TEST + // TODO_OFFCHAIN_WORKER + }} + + #[pallet::call] + impl{impl_gen} Pallet{use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + // TODO_UPGRADE_DISPATCHABLES + }} + + #[pallet::inherent] + // TODO_INHERENT + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + // TODO_EVENT + + // TODO_REMOVE_IF_NO_EVENT + /// Old name generated by `decl_event`. + #[deprecated(note=\"use `Event` instead\")] + pub type RawEvent /* TODO_PUT_EVENT_GENERICS */ = Event /* TODO_PUT_EVENT_GENERICS */; + + #[pallet::error] + // TODO_ERROR + + #[pallet::origin] + // TODO_ORIGIN + + #[pallet::validate_unsigned] + // TODO_VALIDATE_UNSIGNED + + {storages} + + {genesis_config} + + {genesis_build} +}}", + config_gen = config_gen, + store_vis = convert_vis(&def.visibility), + impl_gen = impl_gen, + use_gen = use_gen, + use_gen_tuple = use_gen_tuple, + decl_gen = decl_gen, + storages = storages, + genesis_config = genesis_config, + genesis_build = genesis_build, + pallet_name = def.crate_name, + deprecated_instance_stuff = deprecated_instance_stuff, + ); +} diff --git a/frame/support/procedural/src/storage/storage_struct.rs b/frame/support/procedural/src/storage/storage_struct.rs index e89b06770a6c5..9c049789f9bd9 100644 --- a/frame/support/procedural/src/storage/storage_struct.rs +++ b/frame/support/procedural/src/storage/storage_struct.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/storage/store_trait.rs b/frame/support/procedural/src/storage/store_trait.rs index 7efe65b5f3178..18adadbc61050 100644 --- a/frame/support/procedural/src/storage/store_trait.rs +++ b/frame/support/procedural/src/storage/store_trait.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/src/transactional.rs b/frame/support/procedural/src/transactional.rs index 3c2617a17e508..6ef26834cf024 100644 --- a/frame/support/procedural/src/transactional.rs +++ b/frame/support/procedural/src/transactional.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ use frame_support_procedural_tools::generate_crate_access_2018; pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; - let crate_ = generate_crate_access_2018()?; + let crate_ = generate_crate_access_2018("frame-support")?; let output = quote! { #(#attrs)* #vis #sig { @@ -45,7 +45,7 @@ pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; - let crate_ = generate_crate_access_2018()?; + let crate_ = generate_crate_access_2018("frame-support")?; let output = quote! { #(#attrs)* #vis #sig { diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 2cff2473b85d4..4165cb32c3a5b 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,8 +12,8 @@ description = "Proc macro helpers for procedural macros" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0", path = "./derive" } +frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } proc-macro2 = "1.0.6" quote = "1.0.3" -syn = { version = "1.0.7", features = ["full", "visit"] } -proc-macro-crate = "0.1.4" +syn = { version = "1.0.58", features = ["full", "visit"] } +proc-macro-crate = "0.1.5" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index b616dd790d61e..c377680af16f4 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.6" quote = { version = "1.0.3", features = ["proc-macro"] } -syn = { version = "1.0.7", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } +syn = { version = "1.0.58", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } diff --git a/frame/support/procedural/tools/derive/src/lib.rs b/frame/support/procedural/tools/derive/src/lib.rs index 6e5d6c896cbf8..15394e0c559d4 100644 --- a/frame/support/procedural/tools/derive/src/lib.rs +++ b/frame/support/procedural/tools/derive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/procedural/tools/src/lib.rs b/frame/support/procedural/tools/src/lib.rs index c5a27c809aff8..ce84f69819902 100644 --- a/frame/support/procedural/tools/src/lib.rs +++ b/frame/support/procedural/tools/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -46,17 +46,17 @@ pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { } } -/// Generate the crate access for the `frame-support` crate using 2018 syntax. +/// Generate the crate access for the crate using 2018 syntax. /// -/// Output will for example be `frame_support`. -pub fn generate_crate_access_2018() -> Result { - if std::env::var("CARGO_PKG_NAME").unwrap() == "frame-support" { - Ok(quote::quote!( frame_support )) +/// for `frame-support` output will for example be `frame_support`. +pub fn generate_crate_access_2018(def_crate: &str) -> Result { + if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { + let name = def_crate.to_string().replace("-", "_"); + Ok(syn::Ident::new(&name, Span::call_site())) } else { - match crate_name("frame-support") { + match crate_name(def_crate) { Ok(name) => { - let name = Ident::new(&name, Span::call_site()); - Ok(quote!( #name )) + Ok(Ident::new(&name, Span::call_site())) }, Err(e) => { Err(Error::new(Span::call_site(), &e)) diff --git a/frame/support/procedural/tools/src/syn_ext.rs b/frame/support/procedural/tools/src/syn_ext.rs index 2ba4cf3f28a11..36bd03fed1bef 100644 --- a/frame/support/procedural/tools/src/syn_ext.rs +++ b/frame/support/procedural/tools/src/syn_ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/debug.rs b/frame/support/src/debug.rs index 04f5c529f0aff..43efd3d91623e 100644 --- a/frame/support/src/debug.rs +++ b/frame/support/src/debug.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -134,7 +134,7 @@ macro_rules! runtime_print { use core::fmt::Write; let mut w = $crate::sp_std::Writer::default(); let _ = core::write!(&mut w, $($arg)+); - sp_io::misc::print_utf8(&w.inner()) + $crate::sp_io::misc::print_utf8(&w.inner()) } } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index d55faa28d115b..7927ccd014bd5 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -72,9 +72,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// /// // Private functions are dispatchable, but not available to other /// // FRAME pallets. @@ -98,7 +98,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// /// The declaration is set with the header where: /// -/// * `Module`: The struct generated by the macro, with type `Trait`. +/// * `Module`: The struct generated by the macro, with type `Config`. /// * `Call`: The enum generated for every pallet, which implements [`Callable`](./dispatch/trait.Callable.html). /// * `origin`: Alias of `T::Origin`, declared by the [`impl_outer_origin!`](./macro.impl_outer_origin.html) macro. /// * `Result`: The expected return type from pallet functions. @@ -114,9 +114,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_long_function(origin) -> dispatch::DispatchResult { /// // Your implementation @@ -149,9 +149,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { /// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; @@ -178,9 +178,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::transactional; -/// # use frame_system::Trait; +/// # use frame_system::Config; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// #[transactional] /// fn my_short_function(origin) { @@ -199,9 +199,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed, ensure_root}; +/// # use frame_system::{Config, ensure_signed, ensure_root}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; @@ -236,10 +236,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # pub struct DefaultInstance; /// # pub trait Instance: 'static {} /// # impl Instance for DefaultInstance {} -/// pub trait Trait: frame_system::Trait {} +/// pub trait Config: frame_system::Config {} /// /// decl_module! { -/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { +/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { /// // Your implementation /// } /// } @@ -261,10 +261,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # extern crate frame_support; /// # use frame_support::dispatch; /// # use frame_system::{self as system, ensure_signed}; -/// pub trait Trait: system::Trait where Self::AccountId: From {} +/// pub trait Config: system::Config where Self::AccountId: From {} /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { +/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { /// // Your implementation /// } /// } @@ -1272,11 +1272,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_initialize() -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_initialize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) -> $return { + fn on_initialize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) -> $return { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); { $( $impl )* } } @@ -1289,8 +1289,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_initialize($param:ident : $param_ty:ty) -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_initialize($param: $param_ty) -> $return { @@ -1305,8 +1305,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1326,10 +1326,10 @@ macro_rules! decl_module { let result: $return = (|| { $( $impl )* })(); $crate::crate_to_pallet_version!() - .put_into_storage::<<$trait_instance as $system::Trait>::PalletInfo, Self>(); + .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); let additional_write = < - <$trait_instance as $system::Trait>::DbWeight as $crate::traits::Get<_> + <$trait_instance as $system::Config>::DbWeight as $crate::traits::Get<_> >::get().writes(1); result.saturating_add(additional_write) @@ -1350,10 +1350,10 @@ macro_rules! decl_module { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); $crate::crate_to_pallet_version!() - .put_into_storage::<<$trait_instance as $system::Trait>::PalletInfo, Self>(); + .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); < - <$trait_instance as $system::Trait>::DbWeight as $crate::traits::Get<_> + <$trait_instance as $system::Config>::DbWeight as $crate::traits::Get<_> >::get().writes(1) } } @@ -1394,11 +1394,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_finalize() { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_finalize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { + fn on_finalize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); { $( $impl )* } } @@ -1411,8 +1411,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_finalize($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_finalize($param: $param_ty) { @@ -1427,8 +1427,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { } @@ -1440,11 +1440,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn offchain_worker() { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { $( $impl )* } + fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $( $impl )* } } }; @@ -1454,8 +1454,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn offchain_worker($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn offchain_worker($param: $param_ty) { $( $impl )* } @@ -1467,8 +1467,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1824,7 +1824,7 @@ macro_rules! decl_module { fn storage_version() -> Option<$crate::traits::PalletVersion> { let key = $crate::traits::PalletVersion::storage_key::< - <$trait_instance as $system::Trait>::PalletInfo, Self + <$trait_instance as $system::Config>::PalletInfo, Self >().expect("Every active pallet has a name in the runtime; qed"); $crate::storage::unhashed::get(&key) @@ -1837,7 +1837,7 @@ macro_rules! decl_module { { fn on_genesis() { $crate::crate_to_pallet_version!() - .put_into_storage::<<$trait_instance as $system::Trait>::PalletInfo, Self>(); + .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); } } @@ -2019,7 +2019,7 @@ macro_rules! impl_outer_dispatch { } impl $crate::dispatch::Dispatchable for $call_type { type Origin = $origin; - type Trait = $call_type; + type Config = $call_type; type Info = $crate::weights::DispatchInfo; type PostInfo = $crate::weights::PostDispatchInfo; fn dispatch( @@ -2409,15 +2409,15 @@ mod tests { use crate::weights::{DispatchInfo, DispatchClass, Pays, RuntimeDbWeight}; use crate::traits::{ CallMetadata, GetCallMetadata, GetCallName, OnInitialize, OnFinalize, OnRuntimeUpgrade, - IntegrityTest, Get, + IntegrityTest, Get, PalletInfo, }; - pub trait Trait: system::Trait + Sized where Self::AccountId: From { } + pub trait Config: system::Config + Sized where Self::AccountId: From { } pub mod system { use super::*; - pub trait Trait: 'static { + pub trait Config: 'static { type AccountId; type Call; type BaseCallFilter; @@ -2443,11 +2443,11 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { + pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { /// Hi, this is a comment. #[weight = 0] fn aux_0(_origin) -> DispatchResult { unreachable!() } @@ -2548,7 +2548,7 @@ mod tests { ]; pub struct TraitImpl {} - impl Trait for TraitImpl { } + impl Config for TraitImpl { } type Test = Module; @@ -2562,13 +2562,32 @@ mod tests { } } - impl system::Trait for TraitImpl { + impl PalletInfo for TraitImpl { + fn index() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some(0) + } + + None + } + fn name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some("Test") + } + + None + } + } + + impl system::Config for TraitImpl { type Origin = OuterOrigin; type AccountId = u32; type Call = OuterCall; type BaseCallFilter = (); type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = Self; type DbWeight = (); } diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index c0a886907d0b6..508de49e949c2 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,7 +39,7 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// # /// decl_error! { /// /// Errors that can occur in my module. -/// pub enum MyError for Module { +/// pub enum MyError for Module { /// /// Hey this is an error message that indicates bla. /// MyCoolErrorMessage, /// /// You are just not cool enough for my module! @@ -47,13 +47,13 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// } /// } /// -/// # use frame_system::Trait; +/// # use frame_system::Config; /// /// // You need to register the error type in `decl_module!` as well to make the error /// // exported in the metadata. /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// type Error = MyError; /// /// #[weight = 0] diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs index 3538748c30faf..eb666b6f028ab 100644 --- a/frame/support/src/event.rs +++ b/frame/support/src/event.rs @@ -1,15 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Macros that define an Event types. Events can be used to easily report changes or conditions //! in your runtime to external entities like users, chain explorers, or dApps. @@ -37,7 +41,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// # Generic Event Example: /// /// ```rust -/// trait Trait { +/// trait Config { /// type Balance; /// type Token; /// } @@ -45,7 +49,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// mod event1 { /// // Event that specifies the generic parameter explicitly (`Balance`). /// frame_support::decl_event!( -/// pub enum Event where Balance = ::Balance { +/// pub enum Event where Balance = ::Balance { /// Message(Balance), /// } /// ); @@ -56,7 +60,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// // If no name for the generic parameter is specified explicitly, /// // the name will be taken from the type name of the trait. /// frame_support::decl_event!( -/// pub enum Event where ::Balance { +/// pub enum Event where ::Balance { /// Message(Balance), /// } /// ); @@ -65,7 +69,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// mod event3 { /// // And we even support declaring multiple generic parameters! /// frame_support::decl_event!( -/// pub enum Event where ::Balance, ::Token { +/// pub enum Event where ::Balance, ::Token { /// Message(Balance, Token), /// } /// ); @@ -82,7 +86,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE ///# struct DefaultInstance; ///# trait Instance {} ///# impl Instance for DefaultInstance {} -/// trait Trait { +/// trait Config { /// type Balance; /// type Token; /// } @@ -90,8 +94,8 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// // For module with instances, DefaultInstance is optional /// frame_support::decl_event!( /// pub enum Event where -/// ::Balance, -/// ::Token +/// ::Balance, +/// ::Token /// { /// Message(Balance, Token), /// } @@ -258,10 +262,10 @@ macro_rules! __decl_generic_event { { $( $events:tt )* }; { ,$( $generic_param:ident = $generic_type:ty ),* }; ) => { - /// [`RawEvent`] specialized for the configuration [`Trait`] + /// [`RawEvent`] specialized for the configuration [`Config`] /// /// [`RawEvent`]: enum.RawEvent.html - /// [`Trait`]: trait.Trait.html + /// [`Config`]: trait.Config.html pub type Event<$event_generic_param $(, $instance $( = $event_default_instance)? )?> = RawEvent<$( $generic_type ),* $(, $instance)? >; #[derive( @@ -551,7 +555,7 @@ mod tests { use codec::{Encode, Decode}; mod system { - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -559,7 +563,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } decl_event!( @@ -570,7 +574,7 @@ mod tests { } mod system_renamed { - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -578,7 +582,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } decl_event!( @@ -591,17 +595,17 @@ mod tests { mod event_module { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event without renaming the generic parameter `Balance` and `Origin`. - pub enum Event where ::Balance, ::Origin + pub enum Event where ::Balance, ::Origin { /// Hi, I am a comment. TestEvent(Balance, Origin), @@ -614,19 +618,19 @@ mod tests { mod event_module2 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event with renamed generic parameter pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin { TestEvent(BalanceRenamed), TestOrigin(OriginRenamed), @@ -645,19 +649,19 @@ mod tests { mod event_module4 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event finish formatting on an unnamed one with trailing comma pub enum Event where - ::Balance, - ::Origin, + ::Balance, + ::Origin, { TestEvent(Balance, Origin), } @@ -667,19 +671,19 @@ mod tests { mod event_module5 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event finish formatting on an named one with trailing comma pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin, + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin, { TestEvent(BalanceRenamed, OriginRenamed), TrailingCommaInArgs( @@ -709,45 +713,45 @@ mod tests { pub enum TestEventSystemRenamed for TestRuntime2 { system_renamed, event_module, - #[codec(index = "5")] event_module2, + #[codec(index = 5)] event_module2, event_module3, } } - impl event_module::Trait for TestRuntime { + impl event_module::Config for TestRuntime { type Balance = u32; } - impl event_module2::Trait for TestRuntime { + impl event_module2::Config for TestRuntime { type Balance = u32; } - impl system::Trait for TestRuntime { + impl system::Config for TestRuntime { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = crate::tests::PanicPalletInfo; type DbWeight = (); } - impl event_module::Trait for TestRuntime2 { + impl event_module::Config for TestRuntime2 { type Balance = u32; } - impl event_module2::Trait for TestRuntime2 { + impl event_module2::Config for TestRuntime2 { type Balance = u32; } - impl system_renamed::Trait for TestRuntime2 { + impl system_renamed::Config for TestRuntime2 { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = crate::tests::PanicPalletInfo; type DbWeight = (); } - impl system::Trait for TestRuntime2 { + impl system::Config for TestRuntime2 { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = crate::tests::PanicPalletInfo; type DbWeight = (); } diff --git a/frame/support/src/genesis_config.rs b/frame/support/src/genesis_config.rs index 99f8ad886dd22..2b7cae898ff5b 100644 --- a/frame/support/src/genesis_config.rs +++ b/frame/support/src/genesis_config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index a5de205863d5c..0a8be8aec035a 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,6 +56,7 @@ impl Hashable for T { /// Hasher to use to hash keys to insert to storage. pub trait StorageHasher: 'static { + const METADATA: frame_metadata::StorageHasher; type Output: AsRef<[u8]>; fn hash(x: &[u8]) -> Self::Output; } @@ -73,6 +74,7 @@ pub trait ReversibleStorageHasher: StorageHasher { /// Store the key directly. pub struct Identity; impl StorageHasher for Identity { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Identity; type Output = Vec; fn hash(x: &[u8]) -> Vec { x.to_vec() @@ -87,6 +89,7 @@ impl ReversibleStorageHasher for Identity { /// Hash storage keys with `concat(twox64(key), key)` pub struct Twox64Concat; impl StorageHasher for Twox64Concat { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Twox64Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { twox_64(x) @@ -109,6 +112,7 @@ impl ReversibleStorageHasher for Twox64Concat { /// Hash storage keys with `concat(blake2_128(key), key)` pub struct Blake2_128Concat; impl StorageHasher for Blake2_128Concat { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Blake2_128Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { blake2_128(x) @@ -131,6 +135,7 @@ impl ReversibleStorageHasher for Blake2_128Concat { /// Hash storage keys with blake2 128 pub struct Blake2_128; impl StorageHasher for Blake2_128 { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Blake2_128; type Output = [u8; 16]; fn hash(x: &[u8]) -> [u8; 16] { blake2_128(x) @@ -140,6 +145,7 @@ impl StorageHasher for Blake2_128 { /// Hash storage keys with blake2 256 pub struct Blake2_256; impl StorageHasher for Blake2_256 { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Blake2_256; type Output = [u8; 32]; fn hash(x: &[u8]) -> [u8; 32] { blake2_256(x) @@ -149,6 +155,7 @@ impl StorageHasher for Blake2_256 { /// Hash storage keys with twox 128 pub struct Twox128; impl StorageHasher for Twox128 { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Twox128; type Output = [u8; 16]; fn hash(x: &[u8]) -> [u8; 16] { twox_128(x) @@ -158,6 +165,7 @@ impl StorageHasher for Twox128 { /// Hash storage keys with twox 256 pub struct Twox256; impl StorageHasher for Twox256 { + const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Twox256; type Output = [u8; 32]; fn hash(x: &[u8]) -> [u8; 32] { twox_256(x) diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index 83a1872ab4f3d..3c201dff29c22 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -75,6 +75,7 @@ macro_rules! impl_outer_inherent { fn check_extrinsics(&self, block: &$block) -> $crate::inherent::CheckInherentsResult { use $crate::inherent::{ProvideInherent, IsFatalError}; use $crate::traits::IsSubType; + use $crate::sp_runtime::traits::Block as _; let mut result = $crate::inherent::CheckInherentsResult::new(); for xt in block.extrinsics() { @@ -217,6 +218,10 @@ mod tests { fn create_inherent(_: &InherentData) -> Option { Some(CallTest2::Something) } + + fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { + Ok(Some(().into())) + } } type Block = testing::Block; @@ -259,14 +264,30 @@ mod tests { fn check_inherents_works() { let block = Block::new( Header::new_from_number(1), - vec![Extrinsic { function: Call::Test(CallTest::Something) }], + vec![ + Extrinsic { function: Call::Test2(CallTest2::Something) }, + Extrinsic { function: Call::Test(CallTest::Something) }, + ], ); assert!(InherentData::new().check_extrinsics(&block).ok()); let block = Block::new( Header::new_from_number(1), - vec![Extrinsic { function: Call::Test(CallTest::SomethingElse) }], + vec![ + Extrinsic { function: Call::Test2(CallTest2::Something) }, + Extrinsic { function: Call::Test(CallTest::SomethingElse) }, + ], + ); + + assert!(InherentData::new().check_extrinsics(&block).fatal_error()); + } + + #[test] + fn required_inherents_enforced() { + let block = Block::new( + Header::new_from_number(1), + vec![Extrinsic { function: Call::Test(CallTest::Something) }], ); assert!(InherentData::new().check_extrinsics(&block).fatal_error()); diff --git a/frame/support/src/instances.rs b/frame/support/src/instances.rs new file mode 100644 index 0000000000000..086ed9a6cc175 --- /dev/null +++ b/frame/support/src/instances.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Some instance placeholder to be used in [`frame_support::pallet`] attribute macro. +//! +//! [`frame_support::pallet`] attribute macro does only requires the instance generic `I` to be +//! static (contrary to `decl_*` macro which requires instance generic to implement +//! [`frame_support::traits::Instance`]). +//! +//! Thus support provides some instance types to be used, This allow some instantiable pallet to +//! depend on specific instance of another: +//! ``` +//! # mod another_pallet { pub trait Config {} } +//! pub trait Config: another_pallet::Config {} +//! ``` +//! +//! NOTE: [`frame_support::pallet`] will reexport them inside the module, in order to make them +//! accessible to [`frame_support::construct_runtime`]. + +/// Instance0 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance0; + +/// Instance1 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance1; + +/// Instance2 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance2; + +/// Instance3 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance3; + +/// Instance4 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance4; + +/// Instance5 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance5; + +/// Instance6 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance6; + +/// Instance7 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance7; + +/// Instance8 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance8; + +/// Instance9 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance9; + +/// Instance10 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance10; + +/// Instance11 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance11; + +/// Instance12 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance12; + +/// Instance13 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance13; + +/// Instance14 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance14; + +/// Instance15 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance15; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index a132b787fd9bf..fc7939fe3010b 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -67,6 +67,7 @@ pub mod unsigned; pub mod error; pub mod traits; pub mod weights; +pub mod instances; pub use self::hash::{ Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, Hashable, @@ -80,26 +81,31 @@ pub use self::dispatch::{Parameter, Callable}; pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; /// A type that cannot be instantiated. -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum Never {} /// Create new implementations of the [`Get`](crate::traits::Get) trait. /// -/// The so-called parameter type can be created in three different ways: +/// The so-called parameter type can be created in four different ways: /// -/// - Using `const` to create a parameter type that provides a `const` getter. -/// It is required that the `value` is const. +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. /// /// - Declare the parameter type without `const` to have more freedom when creating the value. /// -/// - Using `storage` to create a storage parameter type. This type is special as it tries to -/// load the value from the storage under a fixed key. If the value could not be found in the -/// storage, the given default value will be returned. It is required that the value implements -/// [`Encode`](codec::Encode) and [`Decode`](codec::Decode). The key for looking up the value -/// in the storage is built using the following formular: +/// - Using `storage` to create a storage parameter type. This type is special as it tries to load +/// the value from the storage under a fixed key. If the value could not be found in the storage, +/// the given default value will be returned. It is required that the value implements +/// [`Encode`](codec::Encode) and [`Decode`](codec::Decode). The key for looking up the value in +/// the storage is built using the following formula: /// /// `twox_128(":" ++ NAME ++ ":")` where `NAME` is the name that is passed as type name. /// +/// - Using `static` to create a static parameter type. Its value is +/// being provided by a static variable with the equivalent name in `UPPER_SNAKE_CASE`. An +/// additional `set` function is provided in this case to alter the static variable. +/// **This is intended for testing ONLY and is ONLY available when `std` is enabled.** +/// /// # Examples /// /// ``` @@ -114,12 +120,14 @@ pub enum Never {} /// /// Visibility of the type is optional /// OtherArgument: u64 = non_const_expression(); /// pub storage StorageArgument: u64 = 5; +/// pub static StaticArgument: u32 = 7; /// } /// /// trait Config { /// type Parameter: Get; /// type OtherParameter: Get; /// type StorageParameter: Get; +/// type StaticParameter: Get; /// } /// /// struct Runtime; @@ -127,7 +135,10 @@ pub enum Never {} /// type Parameter = Argument; /// type OtherParameter = OtherArgument; /// type StorageParameter = StorageArgument; +/// type StaticParameter = StaticArgument; /// } +/// +/// // In testing, `StaticArgument` can be altered later: `StaticArgument::set(8)`. /// ``` /// /// # Invalid example: @@ -142,7 +153,6 @@ pub enum Never {} /// pub const Argument: u64 = non_const_expression(); /// } /// ``` - #[macro_export] macro_rules! parameter_types { ( @@ -235,7 +245,69 @@ macro_rules! parameter_types { I::from(Self::get()) } } - } + }; + ( + $( + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + )* + ) => ( + $crate::parameter_types_impl_thread_local!( + $( + $( #[ $attr ] )* + $vis static $name: $type = $value; + )* + ); + ); +} + +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! parameter_types_impl_thread_local { + ( $( $any:tt )* ) => { + compile_error!("static parameter types is only available in std and for testing."); + }; +} + +#[cfg(feature = "std")] +#[macro_export] +macro_rules! parameter_types_impl_thread_local { + ( + $( + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + )* + ) => { + $crate::parameter_types_impl_thread_local!( + IMPL_THREAD_LOCAL $( $vis, $name, $type, $value, )* + ); + $crate::paste::item! { + $crate::parameter_types!( + $( + $( #[ $attr ] )* + $vis $name: $type = [<$name:snake:upper>].with(|v| v.borrow().clone()); + )* + ); + $( + impl $name { + /// Set the internal value. + pub fn set(t: $type) { + [<$name:snake:upper>].with(|v| *v.borrow_mut() = t); + } + } + )* + } + }; + (IMPL_THREAD_LOCAL $( $vis:vis, $name:ident, $type:ty, $value:expr, )* ) => { + $crate::paste::item! { + thread_local! { + $( + pub static [<$name:snake:upper>]: std::cell::RefCell<$type> = + std::cell::RefCell::new($value); + )* + } + } + }; } /// Macro for easily creating a new implementation of both the `Get` and `Contains` traits. Use @@ -275,14 +347,14 @@ pub use frame_support_procedural::{ /// This is useful for type generic over runtime: /// ``` /// # use frame_support::CloneNoBound; -/// trait Trait { +/// trait Config { /// type C: Clone; /// } /// /// // Foo implements [`Clone`] because `C` bounds [`Clone`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Clone`]. /// #[derive(CloneNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -293,14 +365,14 @@ pub use frame_support_procedural::CloneNoBound; /// This is useful for type generic over runtime: /// ``` /// # use frame_support::{EqNoBound, PartialEqNoBound}; -/// trait Trait { +/// trait Config { /// type C: Eq; /// } /// /// // Foo implements [`Eq`] because `C` bounds [`Eq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Eq`]. /// #[derive(PartialEqNoBound, EqNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -311,14 +383,14 @@ pub use frame_support_procedural::EqNoBound; /// This is useful for type generic over runtime: /// ``` /// # use frame_support::PartialEqNoBound; -/// trait Trait { +/// trait Config { /// type C: PartialEq; /// } /// /// // Foo implements [`PartialEq`] because `C` bounds [`PartialEq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`PartialEq`]. /// #[derive(PartialEqNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -330,14 +402,14 @@ pub use frame_support_procedural::PartialEqNoBound; /// ``` /// # use frame_support::DebugNoBound; /// # use core::fmt::Debug; -/// trait Trait { +/// trait Config { /// type C: Debug; /// } /// /// // Foo implements [`Debug`] because `C` bounds [`Debug`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Debug`]. /// #[derive(DebugNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -416,7 +488,6 @@ macro_rules! ensure { /// /// Used as `assert_noop(expression_to_assert, expected_error_expression)`. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_noop { ( $x:expr, @@ -428,11 +499,25 @@ macro_rules! assert_noop { } } +/// Evaluate any expression and assert that runtime storage has not been mutated +/// (i.e. expression is a storage no-operation). +/// +/// Used as `assert_storage_noop(expression_to_assert)`. +#[macro_export] +macro_rules! assert_storage_noop { + ( + $x:expr + ) => { + let h = $crate::storage_root(); + $x; + assert_eq!(h, $crate::storage_root()); + } +} + /// Assert an expression returns an error specified. /// /// Used as `assert_err!(expression_to_assert, expected_error_expression)` #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err { ( $x:expr , $y:expr $(,)? ) => { assert_eq!($x, Err($y.into())); @@ -444,7 +529,6 @@ macro_rules! assert_err { /// This can be used on`DispatchResultWithPostInfo` when the post info should /// be ignored. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err_ignore_postinfo { ( $x:expr , $y:expr $(,)? ) => { $crate::assert_err!($x.map(|_| ()).map_err(|e| e.error), $y); @@ -453,7 +537,6 @@ macro_rules! assert_err_ignore_postinfo { /// Assert an expression returns error with the given weight. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err_with_weight { ($call:expr, $err:expr, $weight:expr $(,)? ) => { if let Err(dispatch_err_with_post) = $call { @@ -470,7 +553,6 @@ macro_rules! assert_err_with_weight { /// Used as `assert_ok!(expression_to_assert, expected_ok_expression)`, /// or `assert_ok!(expression_to_assert)` which would assert against `Ok(())`. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_ok { ( $x:expr $(,)? ) => { let is = $x; @@ -489,7 +571,7 @@ macro_rules! assert_ok { pub use serde::{Serialize, Deserialize}; #[cfg(test)] -mod tests { +pub mod tests { use super::*; use codec::{Codec, EncodeLike}; use frame_metadata::{ @@ -499,7 +581,19 @@ mod tests { use sp_std::{marker::PhantomData, result}; use sp_io::TestExternalities; - pub trait Trait: 'static { + /// A PalletInfo implementation which just panics. + pub struct PanicPalletInfo; + + impl crate::traits::PalletInfo for PanicPalletInfo { + fn index() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + } + + pub trait Config: 'static { type BlockNumber: Codec + EncodeLike + Default; type Origin; type PalletInfo: crate::traits::PalletInfo; @@ -509,16 +603,16 @@ mod tests { mod module { #![allow(dead_code)] - use super::Trait; + use super::Config; decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } } use self::module::Module; decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): map hasher(twox_64_concat) u32 => u64; pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; @@ -540,10 +634,10 @@ mod tests { } struct Test; - impl Trait for Test { + impl Config for Test { type BlockNumber = u32; type Origin = u32; - type PalletInfo = (); + type PalletInfo = PanicPalletInfo; type DbWeight = (); } @@ -946,3 +1040,1023 @@ mod tests { }) } } + +/// Prelude to be used alongside pallet macro, for ease of use. +pub mod pallet_prelude { + pub use sp_std::marker::PhantomData; + #[cfg(feature = "std")] + pub use frame_support::traits::GenesisBuild; + pub use frame_support::{ + EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DebugNoBound, CloneNoBound, Twox256, + Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, debug, ensure, + RuntimeDebug, storage, + traits::{Get, Hooks, IsType, GetPalletVersion, EnsureOrigin}, + dispatch::{DispatchResultWithPostInfo, Parameter, DispatchError}, + weights::{DispatchClass, Pays, Weight}, + storage::types::{StorageValue, StorageMap, StorageDoubleMap, ValueQuery, OptionQuery}, + }; + pub use codec::{Encode, Decode}; + pub use sp_inherents::{InherentData, InherentIdentifier, ProvideInherent}; + pub use sp_runtime::{ + traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, + transaction_validity::{ + TransactionSource, TransactionValidity, ValidTransaction, TransactionPriority, + TransactionTag, TransactionLongevity, TransactionValidityError, InvalidTransaction, + UnknownTransaction, + }, + }; +} + +/// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. +/// +/// It is define by a module item: +/// ```ignore +/// #[pallet] +/// pub mod pallet { +/// ... +/// } +/// ``` +/// +/// Inside the module the macro will parse item with the attribute: `#[pallet::*]`, some attributes +/// are mandatory, some other optional. +/// +/// The attribute are explained with the syntax of non instantiable pallets, to see how pallet with +/// instance work see below example. +/// +/// Note various type can be automatically imported using pallet_prelude in frame_support and +/// frame_system: +/// ```ignore +/// #[pallet] +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// ... +/// } +/// ``` +/// +/// # Config trait: `#[pallet::config]` mandatory +/// +/// The trait defining generics of the pallet. +/// +/// Item must be defined as +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits +/// $optional_where_clause +/// { +/// ... +/// } +/// ``` +/// I.e. a regular trait definition named `Config`, with supertrait `frame_system::Config`, +/// optionally other supertrait and where clause. +/// +/// The associated type `Event` is reserved, if defined it must bounds `From` and +/// `IsType<::Event>`, see `#[pallet::event]` for more information. +/// +/// To put `Get` associated type into metadatas, use the attribute `#[pallet::constant]`, e.g.: +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] +/// type Foo: Get; +/// } +/// ``` +/// +/// To bypass the `frame_system::Config` supertrait check, use the attribute +/// `#[pallet::disable_frame_system_supertrait_check]`, e.g.: +/// ```ignore +/// #[pallet::config] +/// #[pallet::disable_frame_system_supertrait_check] +/// pub trait Config: pallet_timestamp::Config {} +/// ``` +/// +/// ### Macro expansion: +/// +/// The macro expand pallet constant metadata with the information given by `#[pallet::constant]`. +/// +/// # Pallet struct placeholder: `#[pallet::pallet]` mandatory +/// +/// The placeholder struct, on which is implemented pallet informations. +/// +/// Item must be defined as followed: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// +/// To generate a `Store` trait associating all storages, use the attribute +/// `#[pallet::generate_store($vis trait Store)]`, e.g.: +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(_); +/// ``` +/// More precisely the store trait contains an associated type for each storage. It is implemented +/// for `Pallet` allowing to access the storage from pallet struct. +/// +/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using +/// `::Foo`. +/// +/// ### Macro expansion: +/// +/// The macro add this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replace the type `_` by `PhantomData`. +/// +/// It implements on pallet: +/// * [`traits::GetPalletVersion`] +/// * [`traits::OnGenesis`]: contains some logic to write pallet version into storage. +/// * `ModuleErrorMetadata`: using error declared or no metadata. +/// +/// It declare `type Module` type alias for `Pallet`, used by [`construct_runtime`]. +/// +/// If attribute generate_store then macro create the trait `Store` and implement it on `Pallet`. +/// +/// # Hooks: `#[pallet::hooks]` mandatory +/// +/// Implementation of `Hooks` on `Pallet` allowing to define some specific pallet logic. +/// +/// Item must be defined as +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet $optional_where_clause { +/// } +/// ``` +/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait +/// `Hooks>` (they are defined in preludes), for the type `Pallet` +/// and with an optional where clause. +/// +/// ### Macro expansion: +/// +/// The macro implements the traits `OnInitialize`, `OnFinalize`, `OnRuntimeUpgrade`, +/// `OffchainWorker`, `IntegrityTest` using `Hooks` implementation. +/// +/// NOTE: OnRuntimeUpgrade is implemented with `Hooks::on_runtime_upgrade` and some additional +/// logic. E.g. logic to write pallet version into storage. +/// +/// # Call: `#[pallet::call]` mandatory +/// +/// Implementation of pallet dispatchables. +/// +/// Item must be defined as: +/// ```ignore +/// #[pallet::call] +/// impl Pallet { +/// /// $some_doc +/// #[pallet::weight($ExpressionResultingInWeight)] +/// $vis fn $fn_name( +/// origin: OriginFor, +/// $some_arg: $some_type, +/// // or with compact attribute: #[pallet::compact] $some_arg: $some_type, +/// ... +/// ) -> DispatchResultWithPostInfo { +/// ... +/// } +/// ... +/// } +/// ``` +/// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with +/// optional where clause. +/// +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, +/// the first argument must be `origin: OriginFor`, compact encoding for argument can be used +/// using `#[pallet::compact]`, function must return DispatchResultWithPostInfo. +/// +/// All arguments must implement `Debug`, `PartialEq`, `Eq`, `Decode`, `Encode`, `Clone`. For ease +/// of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// +/// **WARNING**: modifying dispatchables, changing their order, removing some must be done with +/// care. Indeed this will change the outer runtime call type (which is an enum with one variant +/// per pallet), this outer runtime call can be stored on-chain (e.g. in pallet-scheduler). +/// Thus migration might be needed. +/// +/// ### Macro expansion +/// +/// The macro create an enum `Call` with one variant per dispatchable. This enum implements: +/// `Clone`, `Eq`, `PartialEq`, `Debug` (with stripped implementation in `not("std")`), `Encode`, +/// `Decode`, `GetDispatchInfo`, `GetCallName`, `UnfilteredDispatchable`. +/// +/// The macro implement on `Pallet`, the `Callable` trait and a function `call_functions` which +/// returns the dispatchable metadatas. +/// +/// # Extra constants: `#[pallet::extra_constants]` optional +/// +/// Allow to define some extra constants to put into constant metadata. +/// +/// Item must be defined as: +/// ```ignore +/// #[pallet::extra_constants] +/// impl Pallet where $optional_where_clause { +/// /// $some_doc +/// $vis fn $fn_name() -> $some_return_type { +/// ... +/// } +/// ... +/// } +/// ``` +/// I.e. a regular rust implement block with some optional where clause and functions with 0 args, +/// 0 generics, and some return type. +/// +/// ### Macro expansion +/// +/// The macro add some extra constant to pallet constant metadata. +/// +/// # Error: `#[pallet::error]` optional +/// +/// Allow to define an error type to be return from dispatchable on error. +/// This error type informations are put into metadata. +/// +/// Item must be defined as: +/// ```ignore +/// #[pallet::error] +/// pub enum Error { +/// /// $some_optional_doc +/// $SomeFieldLessVariant, +/// ... +/// } +/// ``` +/// I.e. a regular rust enum named `Error`, with generic `T` and fieldless variants. +/// The generic `T` mustn't bound anything and where clause is not allowed. But bounds and where +/// clause shouldn't be needed for any usecase. +/// +/// ### Macro expansion +/// +/// The macro implements `Debug` trait and functions `as_u8` using variant position, and `as_str` +/// using variant doc. +/// +/// The macro implements `From>` for `&'static str`. +/// The macro implements `From>` for `DispatchError`. +/// +/// The macro implements `ModuleErrorMetadata` on `Pallet` defining the `ErrorMetadata` of the +/// pallet. +/// +/// # Event: `#[pallet::event]` optional +/// +/// Allow to define pallet events, pallet events are stored in the block when they deposited (and +/// removed in next block). +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::event] +/// #[pallet::metadata($SomeType = "$Metadata", $SomeOtherType = "$Metadata", ..)] // Optional +/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional +/// pub enum Event<$some_generic> $optional_where_clause { +/// /// Some doc +/// $SomeName($SomeType, $YetanotherType, ...), +/// ... +/// } +/// ``` +/// I.e. an enum (with named or unnamed fields variant), named Event, with generic: none or `T` or +/// `T: Config`, and optional where clause. +/// +/// Each field must implement `Clone`, `Eq`, `PartialEq`, `Encode`, `Decode`, and `Debug` (on std +/// only). +/// For ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// +/// Variant documentations and field types are put into metadata. +/// The attribute `#[pallet::metadata(..)]` allows to specify the metadata to put for some types. +/// +/// The metadata of a type is defined by: +/// * if matching a type in `#[pallet::metadata(..)]`, then the corresponding metadata. +/// * otherwise the type stringified. +/// +/// E.g.: +/// ```ignore +/// #[pallet::event] +/// #[pallet::metadata(u32 = "SpecialU32")] +/// pub enum Event { +/// Proposed(u32, T::AccountId), +/// } +/// ``` +/// will write in event variant metadata `"SpecialU32"` and `"T::AccountId"`. +/// +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generate a helper +/// function on `Pallet` to deposit event. +/// +/// NOTE: For instantiable pallet, event must be generic over T and I. +/// +/// ### Macro expansion: +/// +/// Macro will add on enum `Event` the attributes: +/// * `#[derive(frame_support::CloneNoBound)]`, +/// * `#[derive(frame_support::EqNoBound)]`, +/// * `#[derive(frame_support::PartialEqNoBound)]`, +/// * `#[derive(codec::Encode)]`, +/// * `#[derive(codec::Decode)]`, +/// * `#[derive(frame_support::RuntimeDebugNoBound)]` +/// +/// Macro implements `From>` for (). +/// +/// Macro implements metadata function on `Event` returning the `EventMetadata`. +/// +/// If `#[pallet::generate_deposit]` then macro implement `fn deposit_event` on `Pallet`. +/// +/// # Storage: `#[pallet::storage]` optional +/// +/// Allow to define some abstract storage inside runtime storage and also set its metadata. +/// This attribute can be used multiple times. +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<_, $some_generics, ...>; +/// ``` +/// I.e. it must be a type alias, with generics: `T` or `T: Config`, aliased type must be one +/// of `StorageValue`, `StorageMap` or `StorageDoubleMap` (defined in frame_support). +/// Their first generic must be `_` as it is written by the macro itself. +/// +/// The Prefix generic written by the macro is generated using `PalletInfo::name::>()` +/// and the name of the storage type. +/// E.g. if runtime names the pallet "MyExample" then the storage `type Foo = ...` use the +/// prefix: `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allow to define a +/// getter function on `Pallet`. +/// +/// E.g: +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; +/// ``` +/// +/// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some type +/// alias then the generation of the getter might fail. In this case the getter can be implemented +/// manually. +/// +/// NOTE: The generic `Hasher` must implement the [`StorageHasher`] trait (or the type is not +/// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the +/// storage item. Thus generic hasher is supported. +/// +/// ### Macro expansion +/// +/// For each storage item the macro generates a struct named +/// `_GeneratedPrefixForStorage$NameOfStorage`, and implements [`StorageInstance`](traits::StorageInstance) +/// on it using the pallet and storage name. It then uses it as the first generic of the aliased +/// type. +/// +/// +/// The macro implements the function `storage_metadata` on `Pallet` implementing the metadata for +/// all storage items based on their kind: +/// * for a storage value, the type of the value is copied into the metadata +/// * for a storage map, the type of the values and the key's type is copied into the metadata +/// * for a storage double map, the type of the values, and the types of key1 and key2 are copied into +/// the metadata. +/// +/// # Type value: `#[pallet::type_value]` optional +/// +/// Helper to define a struct implementing `Get` trait. To ease use of storage types. +/// This attribute can be used multiple time. +/// +/// Item is defined as +/// ```ignore +/// #[pallet::type_value] +/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } +/// ``` +/// I.e.: a function definition with generics none or `T: Config` and a returned type. +/// +/// E.g.: +/// ```ignore +/// #[pallet::type_value] +/// fn MyDefault() -> T::Balance { 3.into() } +/// ``` +/// +/// NOTE: This attribute is meant to be used alongside `#[pallet::storage]` to defined some +/// specific default value in storage. +/// +/// ### Macro expansion +/// +/// Macro renames the function to some internal name, generate a struct with the original name of +/// the function and its generic, and implement `Get<$ReturnType>` by calling the user defined +/// function. +/// +/// # Genesis config: `#[pallet::genesis_config]` optional +/// +/// Allow to define the genesis configuration of the pallet. +/// +/// Item is defined as either an enum or a struct. +/// It needs to be public and implement trait GenesisBuild with `#[pallet::genesis_build]`. +/// The type generics is constrained to be either none, or `T` or `T: Config`. +/// +/// E.g: +/// ```ignore +/// #[pallet::genesis_config] +/// pub struct GenesisConfig { +/// _myfield: BalanceOf, +/// } +/// ``` +/// +/// ### Macro expansion +/// +/// Macro will add the following attribute on it: +/// * `#[cfg(feature = "std")]` +/// * `#[derive(Serialize, Deserialize)]` +/// * `#[serde(rename_all = "camelCase")]` +/// * `#[serde(deny_unknown_fields)]` +/// * `#[serde(bound(serialize = ""))]` +/// * `#[serde(bound(deserialize = ""))]` +/// +/// # Genesis build: `#[pallet::genesis_build]` optional +/// +/// Allow to define how genesis_configuration is built. +/// +/// Item is defined as +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig<$maybe_generics> { +/// fn build(&self) { $expr } +/// } +/// ``` +/// I.e. a rust trait implementation with generic `T: Config`, of trait `GenesisBuild` on type +/// `GenesisConfig` with generics none or `T`. +/// +/// E.g.: +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// ``` +/// +/// ### Macro expansion +/// +/// Macro will add the following attribute on it: +/// * `#[cfg(feature = "std")]` +/// +/// Macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as second generic for +/// non-instantiable pallets. +/// +/// # Inherent: `#[pallet::inherent]` optional +/// +/// Allow the pallet to provide some inherent: +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::inherent] +/// impl ProvideInherent for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type +/// `Pallet`, and some optional where clause. +/// +/// ### Macro expansion +/// +/// Macro make currently no use of this information, but it might use this information in the +/// future to give information directly to construct_runtime. +/// +/// # Validate unsigned: `#[pallet::validate_unsigned]` optional +/// +/// Allow the pallet to validate some unsigned transaction: +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::validate_unsigned] +/// impl ValidateUnsigned for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type +/// `Pallet`, and some optional where clause. +/// +/// NOTE: There is also `sp_runtime::traits::SignedExtension` that can be used to add some specific +/// logic for transaction validation. +/// +/// ### Macro expansion +/// +/// Macro make currently no use of this information, but it might use this information in the +/// future to give information directly to construct_runtime. +/// +/// # Origin: `#[pallet::origin]` optional +/// +/// Allow to define some origin for the pallet. +/// +/// Item must be either a type alias or an enum or a struct. It needs to be public. +/// +/// E.g.: +/// ```ignore +/// #[pallet::origin] +/// pub struct Origin(PhantomData<(T)>); +/// ``` +/// +/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin can +/// be stored on-chain (e.g. in pallet-scheduler), thus any change must be done with care as it +/// might require some migration. +/// +/// NOTE: for instantiable pallet, origin must be generic over T and I. +/// +/// # General notes on instantiable pallet +/// +/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allow runtime to +/// implement multiple instance of the pallet, by using different type for the generic. +/// This is the sole purpose of the generic `I`. +/// But because `PalletInfo` requires `Pallet` placeholder to be static it is important to bound +/// `'static` whenever `PalletInfo` can be used. +/// And in order to have instantiable pallet usable as a regular pallet without instance, it is +/// important to bound `= ()` on every types. +/// +/// Thus impl bound look like `impl, I: 'static>`, and types look like +/// `SomeType` or `SomeType, I: 'static = ()>`. +/// +/// # Example for pallet without instance. +/// +/// ``` +/// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` +/// +/// #[frame_support::pallet] +/// // NOTE: The name of the pallet is provided by `construct_runtime` and is used as +/// // the unique identifier for the pallet's storage. It is not defined in the pallet itself. +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; // Import various types used in the pallet definition +/// use frame_system::pallet_prelude::*; // Import some system helper types. +/// +/// type BalanceOf = ::Balance; +/// +/// // Define the generic parameter of the pallet +/// // The macro parses `#[pallet::constant]` attributes and uses them to generate metadata +/// // for the pallet's constants. +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] // put the constant in metadata +/// type MyGetParam: Get; +/// type Balance: Parameter + From; +/// type Event: From> + IsType<::Event>; +/// } +/// +/// // Define some additional constant to put into the constant metadata. +/// #[pallet::extra_constants] +/// impl Pallet { +/// /// Some description +/// fn exra_constant_name() -> u128 { 4u128 } +/// } +/// +/// // Define the pallet struct placeholder, various pallet function are implemented on it. +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(_); +/// +/// // Implement the pallet hooks. +/// #[pallet::hooks] +/// impl Hooks> for Pallet { +/// fn on_initialize(_n: BlockNumberFor) -> Weight { +/// unimplemented!(); +/// } +/// +/// // can implement also: on_finalize, on_runtime_upgrade, offchain_worker, ... +/// // see `Hooks` trait +/// } +/// +/// // Declare Call struct and implement dispatchables. +/// // +/// // WARNING: Each parameter used in functions must implement: Clone, Debug, Eq, PartialEq, +/// // Codec. +/// // +/// // The macro parses `#[pallet::compact]` attributes on function arguments and implements +/// // the `Call` encoding/decoding accordingly. +/// #[pallet::call] +/// impl Pallet { +/// /// Doc comment put in metadata +/// #[pallet::weight(0)] // Defines weight for call (function parameters are in scope) +/// fn toto( +/// origin: OriginFor, +/// #[pallet::compact] _foo: u32, +/// ) -> DispatchResultWithPostInfo { +/// let _ = origin; +/// unimplemented!(); +/// } +/// } +/// +/// // Declare the pallet `Error` enum (this is optional). +/// // The macro generates error metadata using the doc comment on each variant. +/// #[pallet::error] +/// pub enum Error { +/// /// doc comment put into metadata +/// InsufficientProposersBalance, +/// } +/// +/// // Declare pallet Event enum (this is optional). +/// // +/// // WARNING: Each type used in variants must implement: Clone, Debug, Eq, PartialEq, Codec. +/// // +/// // The macro generates event metadata, and derive Clone, Debug, Eq, PartialEq and Codec +/// #[pallet::event] +/// // Additional argument to specify the metadata to use for given type. +/// #[pallet::metadata(BalanceOf = "Balance", u32 = "Other")] +/// // Generate a funciton on Pallet to deposit an event. +/// #[pallet::generate_deposit(pub(super) fn deposit_event)] +/// pub enum Event { +/// /// doc comment put in metadata +/// // `::AccountId` is not defined in metadata list, the last +/// // Thus the metadata is `::AccountId`. +/// Proposed(::AccountId), +/// /// doc +/// // here metadata will be `Balance` as define in metadata list +/// Spending(BalanceOf), +/// // here metadata will be `Other` as define in metadata list +/// Something(u32), +/// } +/// +/// // Define a struct which implements `frame_support::traits::Get` (optional). +/// #[pallet::type_value] +/// pub(super) fn MyDefault() -> T::Balance { 3.into() } +/// +/// // Declare a storage item. Any amount of storage items can be declared (optional). +/// // +/// // Is expected either `StorageValue`, `StorageMap` or `StorageDoubleMap`. +/// // The macro generates the prefix type and replaces the first generic `_`. +/// // +/// // The macro expands the metadata for the storage item with the type used: +/// // * for a storage value the type of the value is copied into the metadata +/// // * for a storage map the type of the values and the type of the key is copied into the metadata +/// // * for a storage double map the types of the values and keys are copied into the +/// // metadata. +/// // +/// // NOTE: The generic `Hasher` must implement the `StorageHasher` trait (or the type is not +/// // usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the +/// // storage item. Thus generic hasher is supported. +/// #[pallet::storage] +/// pub(super) type MyStorageValue = +/// StorageValue<_, T::Balance, ValueQuery, MyDefault>; +/// +/// // Another storage declaration +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; +/// +/// // Declare the genesis config (optional). +/// // +/// // The macro accepts either a struct or an enum; it checks that generics are consistent. +/// // +/// // Type must implement the `Default` trait. +/// #[pallet::genesis_config] +/// #[derive(Default)] +/// pub struct GenesisConfig { +/// _myfield: u32, +/// } +/// +/// // Declare genesis builder. (This is need only if GenesisConfig is declared) +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// +/// // Declare a pallet origin (this is optional). +/// // +/// // The macro accept type alias or struct or enum, it checks generics are consistent. +/// #[pallet::origin] +/// pub struct Origin(PhantomData); +/// +/// // Declare validate_unsigned implementation (this is optional). +/// #[pallet::validate_unsigned] +/// impl ValidateUnsigned for Pallet { +/// type Call = Call; +/// fn validate_unsigned( +/// source: TransactionSource, +/// call: &Self::Call +/// ) -> TransactionValidity { +/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) +/// } +/// } +/// +/// // Declare inherent provider for pallet (this is optional). +/// #[pallet::inherent] +/// impl ProvideInherent for Pallet { +/// type Call = Call; +/// type Error = InherentError; +/// +/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; +/// +/// fn create_inherent(_data: &InherentData) -> Option { +/// unimplemented!(); +/// } +/// } +/// +/// // Regular rust code needed for implementing ProvideInherent trait +/// +/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] +/// #[cfg_attr(feature = "std", derive(codec::Decode))] +/// pub enum InherentError { +/// } +/// +/// impl sp_inherents::IsFatalError for InherentError { +/// fn is_fatal_error(&self) -> bool { +/// unimplemented!(); +/// } +/// } +/// +/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; +/// } +/// ``` +/// +/// # Example for pallet with instance. +/// +/// ``` +/// pub use pallet::*; +/// +/// #[frame_support::pallet] +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// +/// type BalanceOf = >::Balance; +/// +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] +/// type MyGetParam: Get; +/// type Balance: Parameter + From; +/// type Event: From> + IsType<::Event>; +/// } +/// +/// #[pallet::extra_constants] +/// impl, I: 'static> Pallet { +/// /// Some description +/// fn exra_constant_name() -> u128 { 4u128 } +/// } +/// +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(PhantomData<(T, I)>); +/// +/// #[pallet::hooks] +/// impl, I: 'static> Hooks> for Pallet { +/// } +/// +/// #[pallet::call] +/// impl, I: 'static> Pallet { +/// /// Doc comment put in metadata +/// #[pallet::weight(0)] +/// fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { +/// let _ = origin; +/// unimplemented!(); +/// } +/// } +/// +/// #[pallet::error] +/// pub enum Error { +/// /// doc comment put into metadata +/// InsufficientProposersBalance, +/// } +/// +/// #[pallet::event] +/// #[pallet::metadata(BalanceOf = "Balance", u32 = "Other")] +/// #[pallet::generate_deposit(pub(super) fn deposit_event)] +/// pub enum Event, I: 'static = ()> { +/// /// doc comment put in metadata +/// Proposed(::AccountId), +/// /// doc +/// Spending(BalanceOf), +/// Something(u32), +/// } +/// +/// #[pallet::type_value] +/// pub(super) fn MyDefault, I: 'static>() -> T::Balance { 3.into() } +/// +/// #[pallet::storage] +/// pub(super) type MyStorageValue, I: 'static = ()> = +/// StorageValue<_, T::Balance, ValueQuery, MyDefault>; +/// +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = +/// StorageMap<_, Blake2_128Concat, u32, u32>; +/// +/// #[pallet::genesis_config] +/// #[derive(Default)] +/// pub struct GenesisConfig { +/// _myfield: u32, +/// } +/// +/// #[pallet::genesis_build] +/// impl, I: 'static> GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// +/// #[pallet::origin] +/// pub struct Origin(PhantomData<(T, I)>); +/// +/// #[pallet::validate_unsigned] +/// impl, I: 'static> ValidateUnsigned for Pallet { +/// type Call = Call; +/// fn validate_unsigned( +/// source: TransactionSource, +/// call: &Self::Call +/// ) -> TransactionValidity { +/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) +/// } +/// } +/// +/// #[pallet::inherent] +/// impl, I: 'static> ProvideInherent for Pallet { +/// type Call = Call; +/// type Error = InherentError; +/// +/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; +/// +/// fn create_inherent(_data: &InherentData) -> Option { +/// unimplemented!(); +/// } +/// } +/// +/// // Regular rust code needed for implementing ProvideInherent trait +/// +/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] +/// #[cfg_attr(feature = "std", derive(codec::Decode))] +/// pub enum InherentError { +/// } +/// +/// impl sp_inherents::IsFatalError for InherentError { +/// fn is_fatal_error(&self) -> bool { +/// unimplemented!(); +/// } +/// } +/// +/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; +/// } +/// ``` +/// +/// ## Upgrade guidelines: +/// +/// 1. Export the metadata of the pallet for later checks +/// - run your node with the pallet active +/// - query the metadata using the `state_getMetadata` RPC and curl, or use +/// `subsee -p > meta.json` +/// 2. generate the template upgrade for the pallet provided by decl_storage +/// with environment variable `PRINT_PALLET_UPGRADE`: +/// `PRINT_PALLET_UPGRADE=1 cargo check -p my_pallet` This template can be +/// used as information it contains all information for storages, genesis +/// config and genesis build. +/// 3. reorganize pallet to have trait `Config`, `decl_*` macros, `ValidateUnsigned`, +/// `ProvideInherent`, `Origin` all together in one file. Suggested order: +/// * Config, +/// * decl_module, +/// * decl_event, +/// * decl_error, +/// * decl_storage, +/// * origin, +/// * validate_unsigned, +/// * provide_inherent, +/// so far it should compile and all be correct. +/// 4. start writing the new pallet module +/// ```ignore +/// pub use pallet::*; +/// +/// #[frame_support::pallet] +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// use super::*; +/// +/// #[pallet::pallet] +/// #[pallet::generate_store($visibility_of_trait_store trait Store)] +/// // NOTE: if the visibility of trait store is private but you want to make it available +/// // in super, then use `pub(super)` or `pub(crate)` to make it available in crate. +/// pub struct Pallet(_); +/// // pub struct Pallet(PhantomData); // for instantiable pallet +/// } +/// ``` +/// 5. **migrate Config**: move trait into the module with +/// * all const in decl_module to `#[pallet::constant]` +/// * add bound `IsType<::Event>` to `type Event` +/// 7. **migrate decl_module**: write: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks for Pallet { +/// } +/// ``` +/// and write inside on_initialize/on_finalize/on_runtime_upgrade/offchain_worker/integrity_test +/// +/// then write: +/// ```ignore +/// #[pallet::call] +/// impl Pallet { +/// } +/// ``` +/// and write inside all the calls in decl_module with a few changes in the signature: +/// - origin must now be written completely, e.g. `origin: OriginFor` +/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you might +/// need to put `Ok(().into())` at the end or the function. +/// - `#[compact]` must now be written `#[pallet::compact]` +/// - `#[weight = ..]` must now be written `#[pallet::weight(..)]` +/// +/// 7. **migrate event**: +/// rewrite as a simple enum under with the attribute `#[pallet::event]`, +/// use `#[pallet::generate_deposit($vis fn deposit_event)]` to generate deposit_event, +/// use `#[pallet::metadata(...)]` to configure the metadata for types in order not to break them. +/// 8. **migrate error**: rewrite it with attribute `#[pallet::error]`. +/// 9. **migrate storage**: +/// decl_storage provide an upgrade template (see 3.). All storages, genesis config, genesis +/// build and default implementation of genesis config can be taken from it directly. +/// +/// Otherwise here is the manual process: +/// +/// first migrate the genesis logic. write: +/// ```ignore +/// #[pallet::genesis_config] +/// struct GenesisConfig { +/// // fields of add_extra_genesis +/// } +/// impl Default for GenesisConfig { +/// // type default or default provided for fields +/// } +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// // impl GenesisBuild for GenesisConfig { for instantiable pallet +/// fn build() { +/// // The add_extra_genesis build logic +/// } +/// } +/// ``` +/// for each storages, if it contains config(..) then add a fields, and make its default to the +/// value in `= ..;` or the type default if none, if it contains no build then also add the +/// logic to build the value. +/// for each storages if it contains build(..) then add the logic to genesis_build. +/// +/// NOTE: in decl_storage: is executed first the individual config and build and at the end the +/// add_extra_genesis build +/// +/// Once this is done you can migrate storage individually, a few notes: +/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, +/// - for storage with `get(fn ..)` use `#[pallet::getter(fn ...)]` +/// - for storage with value being `Option<$something>` make generic `Value` being `$something` +/// and generic `QueryKind` being `OptionQuery` (note: this is default). Otherwise make +/// `Value` the complete value type and `QueryKind` being `ValueQuery`. +/// - for storage with default value: `= $expr;` provide some specific OnEmpty generic. To do so +/// use of `#[pallet::type_value]` to generate the wanted struct to put. +/// example: `MyStorage: u32 = 3u32` would be written: +/// ```ignore +/// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageValue; +/// ``` +/// +/// NOTE: `decl_storage` also generates functions `assimilate_storage` and `build_storage` +/// directly on GenesisConfig, those are sometimes used in tests. In order not to break they +/// can be implemented manually, one can implement those functions by calling `GenesisBuild` +/// implementation. +/// +/// 10. **migrate origin**: move the origin to the pallet module under `#[pallet::origin]` +/// 11. **migrate validate_unsigned**: move the `ValidateUnsigned` implementation to the pallet +/// module under `#[pallet::validate_unsigned]` +/// 12. **migrate provide_inherent**: move the `ProvideInherent` implementation to the pallet +/// module under `#[pallet::inherent]` +/// 13. rename the usage of `Module` to `Pallet` inside the crate. +/// 14. migration is done, now double check migration with the checking migration guidelines. +/// +/// ## Checking upgrade guidelines: +/// +/// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata +/// and do a diff of the resulting json before and after migration. This checks for: +/// * call, names, signature, docs +/// * event names, docs +/// * error names, docs +/// * storage names, hasher, prefixes, default value +/// * error , error, constant, +/// * manually check that: +/// * `Origin` is moved inside the macro under `#[pallet::origin]` if it exists +/// * `ValidateUnsigned` is moved inside the macro under `#[pallet::validate_unsigned)]` if it exists +/// * `ProvideInherent` is moved inside macro under `#[pallet::inherent)]` if it exists +/// * `on_initialize`/`on_finalize`/`on_runtime_upgrade`/`offchain_worker` are moved to `Hooks` +/// implementation +/// * storages with `config(..)` are converted to `GenesisConfig` field, and their default is +/// `= $expr;` if the storage have default value +/// * storages with `build($expr)` or `config(..)` are built in `GenesisBuild::build` +/// * `add_extra_genesis` fields are converted to `GenesisConfig` field with their correct +/// default if specified +/// * `add_extra_genesis` build is written into `GenesisBuild::build` +/// * storage items defined with [`pallet`] use the name of the pallet provided by [`PalletInfo::name`] +/// as `pallet_prefix` (in `decl_storage`, storage items used the `pallet_prefix` given as input of +/// `decl_storage` with the syntax `as Example`). +/// Thus a runtime using the pallet must be careful with this change. +/// To handle this change: +/// * either ensure that the name of the pallet given to `construct_runtime!` is the same +/// as the name the pallet was giving to `decl_storage`, +/// * or do a storage migration from the old prefix used to the new prefix used. +/// +/// NOTE: The prefixes used by storage items are in the metadata. Thus, ensuring the metadata hasn't +/// changed does ensure that the `pallet_prefix`s used by the storage items haven't changed. +/// +/// # Notes when macro fails to show proper error message spans: +/// +/// Rustc loses span for some macro input. Some tips to fix it: +/// * do not use inner attribute: +/// ```ignore +/// #[pallet] +/// pub mod pallet { +/// //! This inner attribute will make span fail +/// .. +/// } +/// ``` +/// * use the newest nightly possible. +/// +pub use frame_support_procedural::pallet; diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index 80737e4b13d6f..2edaba1cb47e9 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,26 +27,30 @@ pub use frame_metadata::{ /// Example: /// ``` ///# mod module0 { -///# pub trait Trait: 'static { +///# pub trait Config: 'static { ///# type Origin; ///# type BlockNumber; ///# type PalletInfo: frame_support::traits::PalletInfo; ///# type DbWeight: frame_support::traits::Get; ///# } ///# frame_support::decl_module! { -///# pub struct Module for enum Call where origin: T::Origin, system=self {} +///# pub struct Module for enum Call where origin: T::Origin, system=self {} ///# } ///# ///# frame_support::decl_storage! { -///# trait Store for Module as TestStorage {} +///# trait Store for Module as TestStorage {} ///# } ///# } ///# use module0 as module1; ///# use module0 as module2; -///# impl module0::Trait for Runtime { +///# impl frame_support::traits::PalletInfo for Runtime { +///# fn index() -> Option { unimplemented!() } +///# fn name() -> Option<&'static str> { unimplemented!() } +///# } +///# impl module0::Config for Runtime { ///# type Origin = u32; ///# type BlockNumber = u32; -///# type PalletInfo = (); +///# type PalletInfo = Self; ///# type DbWeight = (); ///# } ///# @@ -297,7 +301,7 @@ mod tests { mod system { use super::*; - pub trait Trait: 'static { + pub trait Config: 'static { type BaseCallFilter; const ASSOCIATED_CONST: u64 = 500; type Origin: Into, Self::Origin>> @@ -311,7 +315,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { /// Hi, I am a comment. const BlockNumber: T::BlockNumber = 100.into(); const GetType: T::AccountId = T::SomeValue::get().into(); @@ -341,19 +345,19 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } mod event_module { use crate::dispatch::DispatchResult; use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_event!( - pub enum Event where ::Balance + pub enum Event where ::Balance { /// Hi, I am a comment. TestEvent(Balance), @@ -361,7 +365,7 @@ mod tests { ); decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system { + pub struct Module for enum Call where origin: T::Origin, system=system { type Error = Error; #[weight = 0] @@ -370,7 +374,7 @@ mod tests { } crate::decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Some user input error UserInputError, /// Something bad happened @@ -383,23 +387,23 @@ mod tests { mod event_module2 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_event!( - pub enum Event where ::Balance + pub enum Event where ::Balance { TestEvent(Balance), } ); decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } crate::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { StorageMethod : Option; } add_extra_genesis { @@ -414,6 +418,37 @@ mod tests { #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub struct TestRuntime; + impl crate::traits::PalletInfo for TestRuntime { + fn index() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some(0) + } + if type_id == sp_std::any::TypeId::of::() { + return Some(1) + } + if type_id == sp_std::any::TypeId::of::() { + return Some(2) + } + + None + } + fn name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some("System") + } + if type_id == sp_std::any::TypeId::of::() { + return Some("EventModule") + } + if type_id == sp_std::any::TypeId::of::() { + return Some("EventModule2") + } + + None + } + } + impl_outer_event! { pub enum TestEvent for TestRuntime { system, @@ -433,11 +468,11 @@ mod tests { } } - impl event_module::Trait for TestRuntime { + impl event_module::Config for TestRuntime { type Balance = u32; } - impl event_module2::Trait for TestRuntime { + impl event_module2::Config for TestRuntime { type Balance = u32; } @@ -445,13 +480,13 @@ mod tests { pub const SystemValue: u32 = 600; } - impl system::Trait for TestRuntime { + impl system::Config for TestRuntime { type BaseCallFilter = (); type Origin = Origin; type AccountId = u32; type BlockNumber = u32; type SomeValue = SystemValue; - type PalletInfo = (); + type PalletInfo = Self; type DbWeight = (); type Call = Call; } @@ -480,7 +515,7 @@ mod tests { struct ConstantAssociatedConstByteGetter; impl DefaultByte for ConstantAssociatedConstByteGetter { fn default_byte(&self) -> Vec { - ::ASSOCIATED_CONST.encode() + ::ASSOCIATED_CONST.encode() } } diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index b96a56c8e1d8f..19b24fb84bb1a 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -181,12 +181,12 @@ macro_rules! impl_outer_origin { index { $( $index:tt )? }, )* ) => { - // WARNING: All instance must hold the filter `frame_system::Trait::BaseCallFilter`, except + // WARNING: All instance must hold the filter `frame_system::Config::BaseCallFilter`, except // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. #[derive(Clone)] pub struct $name { caller: $caller_name, - filter: $crate::sp_std::rc::Rc::Call) -> bool>>, + filter: $crate::sp_std::rc::Rc::Call) -> bool>>, } #[cfg(not(feature = "std"))] @@ -213,9 +213,9 @@ macro_rules! impl_outer_origin { } impl $crate::traits::OriginTrait for $name { - type Call = <$runtime as $system::Trait>::Call; + type Call = <$runtime as $system::Config>::Call; type PalletsOrigin = $caller_name; - type AccountId = <$runtime as $system::Trait>::AccountId; + type AccountId = <$runtime as $system::Config>::AccountId; fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static) { let f = self.filter.clone(); @@ -227,8 +227,8 @@ macro_rules! impl_outer_origin { fn reset_filter(&mut self) { let filter = < - <$runtime as $system::Trait>::BaseCallFilter - as $crate::traits::Filter<<$runtime as $system::Trait>::Call> + <$runtime as $system::Config>::BaseCallFilter + as $crate::traits::Filter<<$runtime as $system::Config>::Call> >::filter; self.filter = $crate::sp_std::rc::Rc::new(Box::new(filter)); @@ -246,7 +246,7 @@ macro_rules! impl_outer_origin { &self.caller } - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self { $system::RawOrigin::None.into() } @@ -254,8 +254,8 @@ macro_rules! impl_outer_origin { fn root() -> Self { $system::RawOrigin::Root.into() } - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: <$runtime as $system::Config>::AccountId) -> Self { $system::RawOrigin::Signed(by).into() } } @@ -280,7 +280,7 @@ macro_rules! impl_outer_origin { // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] impl $name { - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. pub fn none() -> Self { <$name as $crate::traits::OriginTrait>::none() } @@ -288,8 +288,8 @@ macro_rules! impl_outer_origin { pub fn root() -> Self { <$name as $crate::traits::OriginTrait>::root() } - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - pub fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + pub fn signed(by: <$runtime as $system::Config>::AccountId) -> Self { <$name as $crate::traits::OriginTrait>::signed(by) } } @@ -302,7 +302,7 @@ macro_rules! impl_outer_origin { impl From<$system::Origin<$runtime>> for $name { /// Convert to runtime origin: /// * root origin is built with no filter - /// * others use `frame-system::Trait::BaseCallFilter` + /// * others use `frame-system::Config::BaseCallFilter` fn from(x: $system::Origin<$runtime>) -> Self { let o: $caller_name = x.into(); o.into() @@ -335,10 +335,10 @@ macro_rules! impl_outer_origin { } } } - impl From::AccountId>> for $name { + impl From::AccountId>> for $name { /// Convert to runtime origin with caller being system signed or none and use filter - /// `frame-system::Trait::BaseCallFilter`. - fn from(x: Option<<$runtime as $system::Trait>::AccountId>) -> Self { + /// `frame-system::Config::BaseCallFilter`. + fn from(x: Option<<$runtime as $system::Config>::AccountId>) -> Self { <$system::Origin<$runtime>>::from(x).into() } } @@ -352,7 +352,7 @@ macro_rules! impl_outer_origin { } impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $name { - /// Convert to runtime origin using `frame-system::Trait::BaseCallFilter`. + /// Convert to runtime origin using `frame-system::Config::BaseCallFilter`. fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { let x: $caller_name = x.into(); x.into() @@ -388,7 +388,7 @@ mod tests { mod frame_system { use super::*; - pub trait Trait { + pub trait Config { type AccountId; type Call; type BaseCallFilter; @@ -410,7 +410,7 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } mod origin_without_generic { @@ -439,7 +439,7 @@ mod tests { } } - impl frame_system::Trait for TestRuntime { + impl frame_system::Config for TestRuntime { type AccountId = u32; type Call = u32; type BaseCallFilter = BaseCallFilter; @@ -478,9 +478,9 @@ mod tests { ); impl_outer_origin!( - pub enum OriginIndices for TestRuntime where system = frame_system, system_index = "11" { + pub enum OriginIndices for TestRuntime where system = frame_system, system_index = 11 { origin_with_generic, - #[codec(index = "10")] origin_without_generic, + #[codec(index = 10)] origin_without_generic, } ); diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 431b5e0930384..c1885fc074307 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,6 +25,14 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; pub use sp_core::storage::{ChildInfo, ChildType}; +/// The outcome of calling [`kill_storage`]. +pub enum KillOutcome { + /// No key remains in the child trie. + AllRemoved, + /// At least one key still resides in the child trie due to the supplied limit. + SomeRemaining, +} + /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( child_info: &ChildInfo, @@ -148,13 +156,37 @@ pub fn exists( } /// Remove all `storage_key` key/values +/// +/// Deletes all keys from the overlay and up to `limit` keys from the backend if +/// it is set to `Some`. No limit is applied when `limit` is set to `None`. +/// +/// The limit can be used to partially delete a child trie in case it is too large +/// to delete in one go (block). +/// +/// # Note +/// +/// Please note that keys that are residing in the overlay for that child trie when +/// issuing this call are all deleted without counting towards the `limit`. Only keys +/// written during the current block are part of the overlay. Deleting with a `limit` +/// mostly makes sense with an empty overlay for that child trie. +/// +/// Calling this function multiple times per block for the same `storage_key` does +/// not make much sense because it is not cumulative when called inside the same block. +/// Use this function to distribute the deletion of a single child trie across multiple +/// blocks. pub fn kill_storage( child_info: &ChildInfo, -) { - match child_info.child_type() { + limit: Option, +) -> KillOutcome { + let all_removed = match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( child_info.storage_key(), + limit ), + }; + match all_removed { + true => KillOutcome::AllRemoved, + false => KillOutcome::SomeRemaining, } } @@ -211,3 +243,21 @@ pub fn root( ), } } + +/// Return the length in bytes of the value without reading it. `None` if it does not exist. +pub fn len( + child_info: &ChildInfo, + key: &[u8], +) -> Option { + match child_info.child_type() { + ChildType::ParentKeyId => { + let mut buffer = [0; 0]; + sp_io::default_child_storage::read( + child_info.storage_key(), + key, + &mut buffer, + 0, + ) + } + } +} diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index cbc62c83de886..7e1a2456e4536 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -153,6 +153,13 @@ impl storage::StorageDoubleMap for G where G::from_optional_value_to_query(unhashed::get(&Self::storage_double_map_final_key(k1, k2))) } + fn try_get(k1: KArg1, k2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike { + unhashed::get(&Self::storage_double_map_final_key(k1, k2)).ok_or(()) + } + fn take(k1: KArg1, k2: KArg2) -> Self::Query where KArg1: EncodeLike, KArg2: EncodeLike, @@ -376,7 +383,7 @@ impl< iterator } - fn translate Option>(f: F) { + fn translate Option>(mut f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); while let Some(next) = sp_io::storage::next_key(&previous_key) @@ -425,7 +432,7 @@ mod test_iterators { storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}, }; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -433,14 +440,14 @@ mod test_iterators { } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); crate::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; } } diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 601fd4c4a8dd2..7f6eb2a518f57 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -162,7 +162,7 @@ impl< iterator } - fn translate Option>(f: F) { + fn translate Option>(mut f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); while let Some(next) = sp_io::storage::next_key(&previous_key) @@ -226,6 +226,10 @@ impl> storage::StorageMap G::from_optional_value_to_query(unhashed::get(Self::storage_map_final_key(key).as_ref())) } + fn try_get>(key: KeyArg) -> Result { + unhashed::get(Self::storage_map_final_key(key).as_ref()).ok_or(()) + } + fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { unhashed::put(Self::storage_map_final_key(key).as_ref(), &val) } @@ -325,7 +329,7 @@ mod test_iterators { storage::{generator::StorageMap, IterableStorageMap, unhashed}, }; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -333,14 +337,14 @@ mod test_iterators { } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); crate::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { Map: map hasher(blake2_128_concat) u16 => u64; } } diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 9346718f63481..fc2a21ff72517 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -42,26 +42,26 @@ mod tests { struct Runtime; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } - impl Trait for Runtime { + impl Config for Runtime { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = crate::tests::PanicPalletInfo; type DbWeight = (); } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } crate::decl_storage! { - trait Store for Module as Runtime { + trait Store for Module as Runtime { Value get(fn value) config(): (u64, u64); NumberMap: map hasher(identity) u32 => u64; DoubleMap: double_map hasher(identity) u32, hasher(identity) u32 => u64; diff --git a/frame/support/src/storage/generator/value.rs b/frame/support/src/storage/generator/value.rs index 2da3d91718438..093dcb305e64a 100644 --- a/frame/support/src/storage/generator/value.rs +++ b/frame/support/src/storage/generator/value.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/storage/hashed.rs b/frame/support/src/storage/hashed.rs index 96a487111a2af..a0c9ab6708e7f 100644 --- a/frame/support/src/storage/hashed.rs +++ b/frame/support/src/storage/hashed.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 75f90ba7b06cc..69b9920194f41 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 97c1eabe6d39d..93cf7c6639064 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,6 +29,7 @@ pub mod child; #[doc(hidden)] pub mod generator; pub mod migration; +pub mod types; #[cfg(all(feature = "std", any(test, debug_assertions)))] mod debug_helper { @@ -106,8 +107,7 @@ pub fn with_transaction(f: impl FnOnce() -> TransactionOutcome) -> R { /// A trait for working with macro-generated storage values under the substrate storage API. /// -/// Details on implementation can be found at -/// [`generator::StorageValue`] +/// Details on implementation can be found at [`generator::StorageValue`]. pub trait StorageValue { /// The type that get/take return. type Query; @@ -121,8 +121,9 @@ pub trait StorageValue { /// Load the value from the provided storage instance. fn get() -> Self::Query; - /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, - /// `Err` if not. + /// Try to get the underlying value from the provided storage instance. + /// + /// Returns `Ok` if it exists, `Err` if not. fn try_get() -> Result; /// Translate a value from some previous type (`O`) to the current type. @@ -199,8 +200,7 @@ pub trait StorageValue { /// A strongly-typed map in storage. /// -/// Details on implementation can be found at -/// [`generator::StorageMap`] +/// Details on implementation can be found at [`generator::StorageMap`]. pub trait StorageMap { /// The type that get/take return. type Query; @@ -214,6 +214,11 @@ pub trait StorageMap { /// Load the value associated with the given key from the map. fn get>(key: KeyArg) -> Self::Query; + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + fn try_get>(key: KeyArg) -> Result; + /// Swap the values of two keys. fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2); @@ -232,7 +237,9 @@ pub trait StorageMap { f: F, ) -> Result; - /// Mutate the value under a key. Deletes the item if mutated to a `None`. + /// Mutate the value under a key. + /// + /// Deletes the item if mutated to a `None`. fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R; /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. @@ -308,7 +315,7 @@ pub trait IterableStorageMap: StorageMap { /// By returning `None` from `f` for an element, you'll remove it from the map. /// /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. - fn translate Option>(f: F); + fn translate Option>(f: F); } /// A strongly-typed double map in storage whose secondary keys and values can be iterated over. @@ -345,7 +352,7 @@ pub trait IterableStorageDoubleMap< /// By returning `None` from `f` for an element, you'll remove it from the map. /// /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. - fn translate Option>(f: F); + fn translate Option>(f: F); } /// An implementation of a map with a two keys. @@ -353,8 +360,7 @@ pub trait IterableStorageDoubleMap< /// It provides an important ability to efficiently remove all entries /// that have a common first key. /// -/// Details on implementation can be found at -/// [`generator::StorageDoubleMap`] +/// Details on implementation can be found at [`generator::StorageDoubleMap`]. pub trait StorageDoubleMap { /// The type that get/take returns. type Query; @@ -377,6 +383,14 @@ pub trait StorageDoubleMap { KArg1: EncodeLike, KArg2: EncodeLike; + /// Try to get the value for the given key from the double map. + /// + /// Returns `Ok` if it exists, `Err` if not. + fn try_get(k1: KArg1, k2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike; + /// Take a value from storage, removing it afterwards. fn take(k1: KArg1, k2: KArg2) -> Self::Query where @@ -600,7 +614,7 @@ pub trait StoragePrefixedMap { /// # Usage /// /// This would typically be called inside the module implementation of on_runtime_upgrade. - fn translate_values Option>(f: F) { + fn translate_values Option>(mut f: F) { let prefix = Self::final_prefix(); let mut previous_key = prefix.clone().to_vec(); while let Some(next) = sp_io::storage::next_key(&previous_key) diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs new file mode 100644 index 0000000000000..f0b5f66eff058 --- /dev/null +++ b/frame/support/src/storage/types/double_map.rs @@ -0,0 +1,613 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage map type. Implements StorageDoubleMap, StorageIterableDoubleMap, +//! StoragePrefixedDoubleMap traits and their methods directly. + +use codec::{FullCodec, Decode, EncodeLike, Encode}; +use crate::{ + storage::{ + StorageAppend, StorageDecodeLength, + types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, + }, + traits::{GetDefault, StorageInstance}, +}; +use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; +use sp_std::vec::Vec; + +/// A type that allow to store values for `(key1, key2)` couple. Similar to `StorageMap` but allow +/// to iterate and remove value associated to first key. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key1)) +/// ++ Hasher2(encode(key2)) +/// ``` +/// +/// # Warning +/// +/// If the key1s (or key2s) are not trusted (e.g. can be set by a user), a cryptographic `hasher` +/// such as `blake2_128_concat` must be used for Hasher1 (resp. Hasher2). Otherwise, other values +/// in storage can be compromised. +pub struct StorageDoubleMap< + Prefix, Hasher1, Key1, Hasher2, Key2, Value, QueryKind=OptionQuery, OnEmpty=GetDefault +>( + core::marker::PhantomData<(Prefix, Hasher1, Key1, Hasher2, Key2, Value, QueryKind, OnEmpty)> +); + +impl + crate::storage::generator::StorageDoubleMap for + StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static +{ + type Query = QueryKind::Query; + type Hasher1 = Hasher1; + type Hasher2 = Hasher2; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl + crate::storage::StoragePrefixedMap for + StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static +{ + fn module_prefix() -> &'static [u8] { + >::module_prefix() + } + fn storage_prefix() -> &'static [u8] { + >::storage_prefix() + } +} + +impl + StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::hashed_key_for(k1, k2) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key(k1: KArg1, k2: KArg2) -> bool + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::contains_key(k1, k2) + } + + /// Load the value associated with the given key from the double map. + pub fn get(k1: KArg1, k2: KArg2) -> QueryKind::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::get(k1, k2) + } + + /// Try to get the value for the given key from the double map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get(k1: KArg1, k2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike { + >::try_get(k1, k2) + } + + /// Take a value from storage, removing it afterwards. + pub fn take(k1: KArg1, k2: KArg2) -> QueryKind::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::take(k1, k2) + } + + /// Swap the values of two key-pairs. + pub fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) + where + XKArg1: EncodeLike, + XKArg2: EncodeLike, + YKArg1: EncodeLike, + YKArg2: EncodeLike, + { + >::swap(x_k1, x_k2, y_k1, y_k2) + } + + /// Store a value to be associated with the given keys from the double map. + pub fn insert(k1: KArg1, k2: KArg2, val: VArg) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + VArg: EncodeLike, + { + >::insert(k1, k2, val) + } + + /// Remove the value under the given keys. + pub fn remove(k1: KArg1, k2: KArg2) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::remove(k1, k2) + } + + /// Remove all values under the first key. + pub fn remove_prefix(k1: KArg1) where KArg1: ?Sized + EncodeLike { + >::remove_prefix(k1) + } + + /// Iterate over values that share the first key. + pub fn iter_prefix_values(k1: KArg1) -> crate::storage::PrefixIterator + where KArg1: ?Sized + EncodeLike + { + >::iter_prefix_values(k1) + } + + /// Mutate the value under the given keys. + pub fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut QueryKind::Query) -> R, + { + >::mutate(k1, k2, f) + } + + /// Mutate the value under the given keys when the closure returns `Ok`. + pub fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + >::try_mutate(k1, k2, f) + } + + /// Mutate the value under the given keys. Deletes the item if mutated to a `None`. + pub fn mutate_exists(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Option) -> R, + { + >::mutate_exists(k1, k2, f) + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(k1: KArg1, k2: KArg2, f: F) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Option) -> Result, + { + >::try_mutate_exists(k1, k2, f) + } + + /// Append the given item to the value in the storage. + /// + /// `Value` is required to implement [`StorageAppend`]. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append( + k1: KArg1, + k2: KArg2, + item: EncodeLikeItem, + ) where + KArg1: EncodeLike, + KArg2: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + >::append(k1, k2, item) + } + + /// Read the length of the storage value without decoding the entire value under the + /// given `key1` and `key2`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len(key1: KArg1, key2: KArg2) -> Option + where + KArg1: EncodeLike, + KArg2: EncodeLike, + Value: StorageDecodeLength, + { + >::decode_len(key1, key2) + } + + /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and + /// `OldHasher2` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_keys< + OldHasher1: crate::StorageHasher, + OldHasher2: crate::StorageHasher, + KeyArg1: EncodeLike, + KeyArg2: EncodeLike, + >(key1: KeyArg1, key2: KeyArg2) -> Option { + < + Self as crate::storage::StorageDoubleMap + >::migrate_keys::(key1, key2) + } + + /// Remove all value of the storage. + pub fn remove_all() { + >::remove_all() + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator { + >::iter_values() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(f: F) { + >::translate_values(f) + } +} + +impl + StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Hasher2: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static +{ + /// Enumerate all elements in the map with first key `k1` in no particular order. + /// + /// If you add or remove values whose first key is `k1` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix(k1: impl EncodeLike) -> crate::storage::PrefixIterator<(Key2, Value)> { + >::iter_prefix(k1) + } + + /// Remove all elements from the map with first key `k1` and iterate through them in no + /// particular order. + /// + /// If you add elements with first key `k1` to the map while doing this, you'll get undefined + /// results. + pub fn drain_prefix(k1: impl EncodeLike) -> crate::storage::PrefixIterator<(Key2, Value)> { + >::drain_prefix(k1) + } + + /// Enumerate all elements in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key1, Key2, Value)> { + >::iter() + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key1, Key2, Value)> { + >::drain() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(f: F) { + >::translate(f) + } +} + +/// Part of storage metadata for a storage double map. +/// +/// NOTE: Generic hashers is supported. +pub trait StorageDoubleMapMetadata { + const MODIFIER: StorageEntryModifier; + const NAME: &'static str; + const DEFAULT: DefaultByteGetter; + const HASHER1: frame_metadata::StorageHasher; + const HASHER2: frame_metadata::StorageHasher; +} + +impl StorageDoubleMapMetadata + for StorageDoubleMap where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static +{ + const MODIFIER: StorageEntryModifier = QueryKind::METADATA; + const HASHER1: frame_metadata::StorageHasher = Hasher1::METADATA; + const HASHER2: frame_metadata::StorageHasher = Hasher2::METADATA; + const NAME: &'static str = Prefix::STORAGE_PREFIX; + const DEFAULT: DefaultByteGetter = + DefaultByteGetter(&OnEmptyGetter::(core::marker::PhantomData)); +} + +#[cfg(test)] +mod test { + use super::*; + use sp_io::{TestExternalities, hashing::twox_128}; + use crate::hash::*; + use crate::storage::types::ValueQuery; + use frame_metadata::StorageEntryModifier; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { "test" } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test() { + type A = StorageDoubleMap< + Prefix, Blake2_128Concat, u16, Twox64Concat, u8, u32, OptionQuery + >; + type AValueQueryWithAnOnEmpty = StorageDoubleMap< + Prefix, Blake2_128Concat, u16, Twox64Concat, u8, u32, ValueQuery, ADefault + >; + type B = StorageDoubleMap; + type C = StorageDoubleMap; + type WithLen = StorageDoubleMap>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.blake2_128_concat()); + k.extend(&30u8.twox_64_concat()); + assert_eq!(A::hashed_key_for(3, 30).to_vec(), k); + + assert_eq!(A::contains_key(3, 30), false); + assert_eq!(A::get(3, 30), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(3, 30), 97); + + A::insert(3, 30, 10); + assert_eq!(A::contains_key(3, 30), true); + assert_eq!(A::get(3, 30), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(3, 30), 10); + + A::swap(3, 30, 2, 20); + assert_eq!(A::contains_key(3, 30), false); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(3, 30), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(3, 30), 97); + assert_eq!(A::get(2, 20), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(2, 20), 10); + + A::remove(2, 20); + assert_eq!(A::contains_key(2, 20), false); + assert_eq!(A::get(2, 20), None); + + AValueQueryWithAnOnEmpty::mutate(2, 20, |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate(2, 20, |v| *v = *v * 2); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(97 * 4)); + + A::remove(2, 20); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { + *v = *v * 2; Ok(()) + }); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { + *v = *v * 2; Ok(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(97 * 4)); + + A::remove(2, 20); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { + *v = *v * 2; Err(()) + }); + assert_eq!(A::contains_key(2, 20), false); + + A::remove(2, 20); + AValueQueryWithAnOnEmpty::mutate_exists(2, 20, |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists(2, 20, |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(100)); + + A::remove(2, 20); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, 20, |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, 20, |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(100)); + assert_eq!(A::try_get(2, 20), Ok(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, 20, |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(100)); + + + A::insert(2, 20, 10); + assert_eq!(A::take(2, 20), Some(10)); + assert_eq!(A::contains_key(2, 20), false); + assert_eq!(AValueQueryWithAnOnEmpty::take(2, 20), 97); + assert_eq!(A::contains_key(2, 20), false); + assert_eq!(A::try_get(2, 20), Err(())); + + B::insert(2, 20, 10); + assert_eq!(A::migrate_keys::(2, 20), Some(10)); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(10)); + + A::insert(3, 30, 10); + A::insert(4, 40, 10); + A::remove_all(); + assert_eq!(A::contains_key(3, 30), false); + assert_eq!(A::contains_key(4, 40), false); + + A::insert(3, 30, 10); + A::insert(4, 40, 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert(3, 30, 10); + C::insert(4, 40, 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40, 20), (3, 30, 20)]); + + A::insert(3, 30, 10); + A::insert(4, 40, 10); + assert_eq!(A::iter().collect::>(), vec![(4, 40, 10), (3, 30, 10)]); + assert_eq!(A::drain().collect::>(), vec![(4, 40, 10), (3, 30, 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert(3, 30, 10); + C::insert(4, 40, 10); + A::translate::(|k1, k2, v| Some((k1 * k2 as u16 * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40, 1600), (3, 30, 900)]); + + assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); + assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); + assert_eq!(A::HASHER1, frame_metadata::StorageHasher::Blake2_128Concat); + assert_eq!(A::HASHER2, frame_metadata::StorageHasher::Twox64Concat); + assert_eq!( + AValueQueryWithAnOnEmpty::HASHER1, + frame_metadata::StorageHasher::Blake2_128Concat + ); + assert_eq!( + AValueQueryWithAnOnEmpty::HASHER2, + frame_metadata::StorageHasher::Twox64Concat + ); + assert_eq!(A::NAME, "foo"); + assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 97u32.encode()); + assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); + + WithLen::remove_all(); + assert_eq!(WithLen::decode_len(3, 30), None); + WithLen::append(0, 100, 10); + assert_eq!(WithLen::decode_len(0, 100), Some(1)); + + A::insert(3, 30, 11); + A::insert(3, 31, 12); + A::insert(4, 40, 13); + A::insert(4, 41, 14); + assert_eq!(A::iter_prefix_values(3).collect::>(), vec![12, 11]); + assert_eq!(A::iter_prefix(3).collect::>(), vec![(31, 12), (30, 11)]); + assert_eq!(A::iter_prefix_values(4).collect::>(), vec![13, 14]); + assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); + + A::remove_prefix(3); + assert_eq!(A::iter_prefix(3).collect::>(), vec![]); + assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); + + assert_eq!(A::drain_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); + assert_eq!(A::iter_prefix(4).collect::>(), vec![]); + assert_eq!(A::drain_prefix(4).collect::>(), vec![]); + }) + } +} diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs new file mode 100644 index 0000000000000..4af28a77cf2b6 --- /dev/null +++ b/frame/support/src/storage/types/map.rs @@ -0,0 +1,488 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage map type. Implements StorageMap, StorageIterableMap, StoragePrefixedMap traits and their +//! methods directly. + +use codec::{FullCodec, Decode, EncodeLike, Encode}; +use crate::{ + storage::{ + StorageAppend, StorageDecodeLength, + types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, + }, + traits::{GetDefault, StorageInstance}, +}; +use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; +use sp_std::prelude::*; + +/// A type that allow to store value for given key. Allowing to insert/remove/iterate on values. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key)) +/// ``` +/// +/// # Warning +/// +/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as +/// `blake2_128_concat` must be used. Otherwise, other values in storage can be compromised. +pub struct StorageMap( + core::marker::PhantomData<(Prefix, Hasher, Key, Value, QueryKind, OnEmpty)> +); + +impl + crate::storage::generator::StorageMap + for StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + type Query = QueryKind::Query; + type Hasher = Hasher; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl crate::storage::StoragePrefixedMap for + StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + fn module_prefix() -> &'static [u8] { + >::module_prefix() + } + fn storage_prefix() -> &'static [u8] { + >::storage_prefix() + } +} + +impl + StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for>(key: KeyArg) -> Vec { + >::hashed_key_for(key) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key>(key: KeyArg) -> bool { + >::contains_key(key) + } + + /// Load the value associated with the given key from the map. + pub fn get>(key: KeyArg) -> QueryKind::Query { + >::get(key) + } + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get>(key: KeyArg) -> Result { + >::try_get(key) + } + + /// Swap the values of two keys. + pub fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { + >::swap(key1, key2) + } + + /// Store a value to be associated with the given key from the map. + pub fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { + >::insert(key, val) + } + + /// Remove the value under a key. + pub fn remove>(key: KeyArg) { + >::remove(key) + } + + /// Mutate the value under a key. + pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( + key: KeyArg, + f: F + ) -> R { + >::mutate(key, f) + } + + /// Mutate the item, only if an `Ok` value is returned. + pub fn try_mutate(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + >::try_mutate(key, f) + } + + /// Mutate the value under a key. Deletes the item if mutated to a `None`. + pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F + ) -> R { + >::mutate_exists(key, f) + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike, + F: FnOnce(&mut Option) -> Result, + { + >::try_mutate_exists(key, f) + } + + /// Take the value under a key. + pub fn take>(key: KeyArg) -> QueryKind::Query { + >::take(key) + } + + /// Append the given items to the value in the storage. + /// + /// `Value` is required to implement `codec::EncodeAppend`. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) + where + EncodeLikeKey: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend + { + >::append(key, item) + } + + /// Read the length of the storage value without decoding the entire value under the + /// given `key`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len>(key: KeyArg) -> Option + where Value: StorageDecodeLength, + { + >::decode_len(key) + } + + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_key>( + key: KeyArg + ) -> Option { + >::migrate_key::(key) + } + + /// Remove all value of the storage. + pub fn remove_all() { + >::remove_all() + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator { + >::iter_values() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(f: F) { + >::translate_values(f) + } +} + +impl + StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + /// Enumerate all elements in the map in no particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key, Value)> { + >::iter() + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key, Value)> { + >::drain() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(f: F) { + >::translate(f) + } +} + +/// Part of storage metadata for a storage map. +/// +/// NOTE: Generic hasher is supported. +pub trait StorageMapMetadata { + const MODIFIER: StorageEntryModifier; + const NAME: &'static str; + const DEFAULT: DefaultByteGetter; + const HASHER: frame_metadata::StorageHasher; +} + +impl StorageMapMetadata + for StorageMap where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + const MODIFIER: StorageEntryModifier = QueryKind::METADATA; + const HASHER: frame_metadata::StorageHasher = Hasher::METADATA; + const NAME: &'static str = Prefix::STORAGE_PREFIX; + const DEFAULT: DefaultByteGetter = + DefaultByteGetter(&OnEmptyGetter::(core::marker::PhantomData)); +} + +#[cfg(test)] +mod test { + use super::*; + use sp_io::{TestExternalities, hashing::twox_128}; + use crate::hash::*; + use crate::storage::types::ValueQuery; + use frame_metadata::StorageEntryModifier; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { "test" } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test() { + type A = StorageMap; + type AValueQueryWithAnOnEmpty = StorageMap< + Prefix, Blake2_128Concat, u16, u32, ValueQuery, ADefault + >; + type B = StorageMap; + type C = StorageMap; + type WithLen = StorageMap>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.blake2_128_concat()); + assert_eq!(A::hashed_key_for(3).to_vec(), k); + + assert_eq!(A::contains_key(3), false); + assert_eq!(A::get(3), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(3), 97); + + A::insert(3, 10); + assert_eq!(A::contains_key(3), true); + assert_eq!(A::get(3), Some(10)); + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(3), 10); + + A::swap(3, 2); + assert_eq!(A::contains_key(3), false); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(3), None); + assert_eq!(A::try_get(3), Err(())); + assert_eq!(AValueQueryWithAnOnEmpty::get(3), 97); + assert_eq!(A::get(2), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(2), 10); + + A::remove(2); + assert_eq!(A::contains_key(2), false); + assert_eq!(A::get(2), None); + + AValueQueryWithAnOnEmpty::mutate(2, |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate(2, |v| *v = *v * 2); + assert_eq!(AValueQueryWithAnOnEmpty::contains_key(2), true); + assert_eq!(AValueQueryWithAnOnEmpty::get(2), 97 * 4); + + A::remove(2); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { + *v = *v * 2; Ok(()) + }); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { + *v = *v * 2; Ok(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(97 * 4)); + + A::remove(2); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { + *v = *v * 2; Err(()) + }); + assert_eq!(A::contains_key(2), false); + + A::remove(2); + AValueQueryWithAnOnEmpty::mutate_exists(2, |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists(2, |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(100)); + + A::remove(2); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(100)); + + + A::insert(2, 10); + assert_eq!(A::take(2), Some(10)); + assert_eq!(A::contains_key(2), false); + assert_eq!(AValueQueryWithAnOnEmpty::take(2), 97); + assert_eq!(A::contains_key(2), false); + + B::insert(2, 10); + assert_eq!(A::migrate_key::(2), Some(10)); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(10)); + + A::insert(3, 10); + A::insert(4, 10); + A::remove_all(); + assert_eq!(A::contains_key(3), false); + assert_eq!(A::contains_key(4), false); + + A::insert(3, 10); + A::insert(4, 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert(3, 10); + C::insert(4, 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 20), (3, 20)]); + + A::insert(3, 10); + A::insert(4, 10); + assert_eq!(A::iter().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::drain().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert(3, 10); + C::insert(4, 10); + A::translate::(|k, v| Some((k * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); + + assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); + assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); + assert_eq!(A::HASHER, frame_metadata::StorageHasher::Blake2_128Concat); + assert_eq!( + AValueQueryWithAnOnEmpty::HASHER, + frame_metadata::StorageHasher::Blake2_128Concat + ); + assert_eq!(A::NAME, "foo"); + assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 97u32.encode()); + assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); + + WithLen::remove_all(); + assert_eq!(WithLen::decode_len(3), None); + WithLen::append(0, 10); + assert_eq!(WithLen::decode_len(0), Some(1)); + }) + } +} diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs new file mode 100644 index 0000000000000..5bb6684b79259 --- /dev/null +++ b/frame/support/src/storage/types/mod.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage types to build abstraction on storage, they implements storage traits such as +//! StorageMap and others. + +use codec::FullCodec; +use frame_metadata::{DefaultByte, StorageEntryModifier}; + +mod value; +mod map; +mod double_map; + +pub use value::{StorageValue, StorageValueMetadata}; +pub use map::{StorageMap, StorageMapMetadata}; +pub use double_map::{StorageDoubleMap, StorageDoubleMapMetadata}; + +/// Trait implementing how the storage optional value is converted into the queried type. +/// +/// It is implemented by: +/// * `OptionQuery` which convert an optional value to an optional value, user when querying +/// storage will get an optional value. +/// * `ValueQuery` which convert an optional value to a value, user when querying storage will get +/// a value. +pub trait QueryKindTrait { + /// Metadata for the storage kind. + const METADATA: StorageEntryModifier; + + /// Type returned on query + type Query: FullCodec + 'static; + + /// Convert an optional value (i.e. some if trie contains the value or none otherwise) to the + /// query. + fn from_optional_value_to_query(v: Option) -> Self::Query; + + /// Convert a query to an optional value. + fn from_query_to_optional_value(v: Self::Query) -> Option; +} + +/// Implement QueryKindTrait with query being `Option` +/// +/// NOTE: it doesn't support a generic `OnEmpty`. This means only `None` can be +/// returned when no value is found. To use another `OnEmpty` implementation, `ValueQuery` can be +/// used instead. +pub struct OptionQuery; +impl QueryKindTrait for OptionQuery +where + Value: FullCodec + 'static, +{ + const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; + + type Query = Option; + + fn from_optional_value_to_query(v: Option) -> Self::Query { + // NOTE: OnEmpty is fixed to GetDefault, thus it returns `None` on no value. + v + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + v + } +} + +/// Implement QueryKindTrait with query being `Value` +pub struct ValueQuery; +impl QueryKindTrait for ValueQuery +where + Value: FullCodec + 'static, + OnEmpty: crate::traits::Get, +{ + const METADATA: StorageEntryModifier = StorageEntryModifier::Default; + + type Query = Value; + + fn from_optional_value_to_query(v: Option) -> Self::Query { + v.unwrap_or_else(|| OnEmpty::get()) + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + Some(v) + } +} + +/// A helper struct which implements DefaultByte using `Get` and encode it. +struct OnEmptyGetter(core::marker::PhantomData<(Value, OnEmpty)>); +impl> DefaultByte + for OnEmptyGetter +{ + fn default_byte(&self) -> sp_std::vec::Vec { + OnEmpty::get().encode() + } +} +unsafe impl > Send for OnEmptyGetter {} +unsafe impl > Sync for OnEmptyGetter {} diff --git a/frame/support/src/storage/types/value.rs b/frame/support/src/storage/types/value.rs new file mode 100644 index 0000000000000..39f718956eb64 --- /dev/null +++ b/frame/support/src/storage/types/value.rs @@ -0,0 +1,280 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage value type. Implements StorageValue trait and its method directly. + +use codec::{FullCodec, Decode, EncodeLike, Encode}; +use crate::{ + storage::{ + StorageAppend, StorageDecodeLength, + types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, + }, + traits::{GetDefault, StorageInstance}, +}; +use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; + +/// A type that allow to store a value. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) ++ Twox128(Prefix::STORAGE_PREFIX) +/// ``` +pub struct StorageValue( + core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)> +); + +impl crate::storage::generator::StorageValue for + StorageValue +where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + type Query = QueryKind::Query; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl StorageValue +where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + /// Get the storage key. + pub fn hashed_key() -> [u8; 32] { >::hashed_key() } + + /// Does the value (explicitly) exist in storage? + pub fn exists() -> bool { >::exists() } + + /// Load the value from the provided storage instance. + pub fn get() -> QueryKind::Query { >::get() } + + /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, + /// `Err` if not. + pub fn try_get() -> Result { + >::try_get() + } + + /// Translate a value from some previous type (`O`) to the current type. + /// + /// `f: F` is the translation function. + /// + /// Returns `Err` if the storage item could not be interpreted as the old type, and Ok, along + /// with the new value if it could. + /// + /// NOTE: This operates from and to `Option<_>` types; no effort is made to respect the default + /// value of the original type. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade, + /// while ensuring **no usage of this storage are made before the call to + /// `on_runtime_upgrade`**. (More precisely prior initialized modules doesn't make use of this + /// storage). + pub fn translate) -> Option>( + f: F, + ) -> Result, ()> { + >::translate(f) + } + + /// Store a value under this key into the provided storage instance. + pub fn put>(val: Arg) { + >::put(val) + } + + /// Store a value under this key into the provided storage instance. + /// + /// this uses the query type rather than the underlying value. + pub fn set(val: QueryKind::Query) { >::set(val) } + + /// Mutate the value + pub fn mutate R>(f: F) -> R { + >::mutate(f) + } + + /// Mutate the value if closure returns `Ok` + pub fn try_mutate Result>( + f: F, + ) -> Result { + >::try_mutate(f) + } + + /// Clear the storage value. + pub fn kill() { >::kill() } + + /// Take a value from storage, removing it afterwards. + pub fn take() -> QueryKind::Query { >::take() } + + /// Append the given item to the value in the storage. + /// + /// `Value` is required to implement [`StorageAppend`]. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage item will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(item: EncodeLikeItem) + where + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend + { + >::append(item) + } + + /// Read the length of the storage value without decoding the entire value. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len() -> Option where Value: StorageDecodeLength { + >::decode_len() + } +} + +/// Part of storage metadata for storage value. +pub trait StorageValueMetadata { + const MODIFIER: StorageEntryModifier; + const NAME: &'static str; + const DEFAULT: DefaultByteGetter; +} + +impl StorageValueMetadata + for StorageValue where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + const MODIFIER: StorageEntryModifier = QueryKind::METADATA; + const NAME: &'static str = Prefix::STORAGE_PREFIX; + const DEFAULT: DefaultByteGetter = + DefaultByteGetter(&OnEmptyGetter::(core::marker::PhantomData)); +} + +#[cfg(test)] +mod test { + use super::*; + use sp_io::{TestExternalities, hashing::twox_128}; + use crate::storage::types::ValueQuery; + use frame_metadata::StorageEntryModifier; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { "test" } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test() { + type A = StorageValue; + type AValueQueryWithAnOnEmpty = StorageValue; + type B = StorageValue; + type WithLen = StorageValue>; + + TestExternalities::default().execute_with(|| { + assert_eq!(A::hashed_key().to_vec(), [twox_128(b"test"), twox_128(b"foo")].concat()); + assert_eq!(A::exists(), false); + assert_eq!(A::get(), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(), 97); + assert_eq!(A::try_get(), Err(())); + + A::put(2); + assert_eq!(A::exists(), true); + assert_eq!(A::get(), Some(2)); + assert_eq!(AValueQueryWithAnOnEmpty::get(), 2); + assert_eq!(A::try_get(), Ok(2)); + assert_eq!(A::try_get(), Ok(2)); + + B::put(4); + A::translate::(|v| v.map(Into::into)).unwrap(); + assert_eq!(A::try_get(), Ok(4)); + + A::set(None); + assert_eq!(A::try_get(), Err(())); + + A::set(Some(2)); + assert_eq!(A::try_get(), Ok(2)); + + A::mutate(|v| *v = Some(v.unwrap() * 2)); + assert_eq!(A::try_get(), Ok(4)); + + A::set(Some(4)); + let _: Result<(), ()> = A::try_mutate(|v| { *v = Some(v.unwrap() * 2); Ok(()) }); + assert_eq!(A::try_get(), Ok(8)); + + let _: Result<(), ()> = A::try_mutate(|v| { *v = Some(v.unwrap() * 2); Err(()) }); + assert_eq!(A::try_get(), Ok(8)); + + A::kill(); + AValueQueryWithAnOnEmpty::mutate(|v| *v = *v * 2); + assert_eq!(AValueQueryWithAnOnEmpty::try_get(), Ok(97 * 2)); + + AValueQueryWithAnOnEmpty::kill(); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(|v| { + *v = *v * 2; Ok(()) + }); + assert_eq!(AValueQueryWithAnOnEmpty::try_get(), Ok(97 * 2)); + + A::kill(); + assert_eq!(A::try_get(), Err(())); + + assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); + assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); + assert_eq!(A::NAME, "foo"); + assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); + assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 97u32.encode()); + + WithLen::kill(); + assert_eq!(WithLen::decode_len(), None); + WithLen::append(3); + assert_eq!(WithLen::decode_len(), Some(1)); + }); + } +} diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index 42f21cab4993a..8ac4240a9f0e9 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 1fadb079e5a5c..106ec10c6c4ee 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,13 +23,15 @@ use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; use sp_core::u32_trait::Value as U32; use sp_runtime::{ - RuntimeDebug, ConsensusEngineId, DispatchResult, DispatchError, + RuntimeAppPublic, RuntimeDebug, BoundToRuntimeAppPublic, + ConsensusEngineId, DispatchResult, DispatchError, traits::{ - MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, - BadOrigin, AtLeast32BitUnsigned, UniqueSaturatedFrom, UniqueSaturatedInto, - SaturatedConversion, + MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, + BadOrigin, AtLeast32BitUnsigned, Convert, UniqueSaturatedFrom, UniqueSaturatedInto, + SaturatedConversion, StoredMapError, }, }; +use sp_staking::SessionIndex; use crate::dispatch::Parameter; use crate::storage::StorageMap; use crate::weights::Weight; @@ -40,6 +42,67 @@ use impl_trait_for_tuples::impl_for_tuples; #[doc(hidden)] pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; +/// A trait for online node inspection in a session. +/// +/// Something that can give information about the current validator set. +pub trait ValidatorSet { + /// Type for representing validator id in a session. + type ValidatorId: Parameter; + /// A type for converting `AccountId` to `ValidatorId`. + type ValidatorIdOf: Convert>; + + /// Returns current session index. + fn session_index() -> SessionIndex; + + /// Returns the active set of validators. + fn validators() -> Vec; +} + +/// [`ValidatorSet`] combined with an identification. +pub trait ValidatorSetWithIdentification: ValidatorSet { + /// Full identification of `ValidatorId`. + type Identification: Parameter; + /// A type for converting `ValidatorId` to `Identification`. + type IdentificationOf: Convert>; +} + +/// A session handler for specific key type. +pub trait OneSessionHandler: BoundToRuntimeAppPublic { + /// The key type expected. + type Key: Decode + Default + RuntimeAppPublic; + + /// The given validator set will be used for the genesis session. + /// It is guaranteed that the given validator set will also be used + /// for the second session, therefore the first call to `on_new_session` + /// should provide the same validator set. + fn on_genesis_session<'a, I: 'a>(validators: I) + where I: Iterator, ValidatorId: 'a; + + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true when at least one of the session keys + /// or the underlying economic identities/distribution behind one the + /// session keys has changed, false otherwise. + /// + /// The `validators` are the validators of the incoming session, and `queued_validators` + /// will follow. + fn on_new_session<'a, I: 'a>( + changed: bool, + validators: I, + queued_validators: I, + ) where I: Iterator, ValidatorId: 'a; + + /// A notification for end of the session. + /// + /// Note it is triggered before any `SessionManager::end_session` handlers, + /// so we can still affect the validator set. + fn on_before_session_ending() {} + + /// A validator got disabled. Act accordingly until a new session begins. + fn on_disabled(_validator_index: usize); +} + /// Simple trait for providing a filter over a reference to some type. pub trait Filter { /// Determine if a given value should be allowed through the filter (returns `true`) or not. @@ -300,42 +363,61 @@ mod test_impl_filter_stack { /// An abstraction of a value stored within storage, but possibly as part of a larger composite /// item. -pub trait StoredMap { +pub trait StoredMap { /// Get the item, or its default if it doesn't yet exist; we make no distinction between the /// two. fn get(k: &K) -> T; - /// Get whether the item takes up any storage. If this is `false`, then `get` will certainly - /// return the `T::default()`. If `true`, then there is no implication for `get` (i.e. it - /// may return any value, including the default). - /// - /// NOTE: This may still be `true`, even after `remove` is called. This is the case where - /// a single storage entry is shared between multiple `StoredMap` items single, without - /// additional logic to enforce it, deletion of any one them doesn't automatically imply - /// deletion of them all. - fn is_explicit(k: &K) -> bool; - /// Mutate the item. - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R; - /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R; + /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is /// returned. It is removed or reset to default value if it has been mutated to `None` - fn try_mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> Result) -> Result; + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result; + + // Everything past here has a default implementation. + + /// Mutate the item. + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + Self::mutate_exists(k, |maybe_account| match maybe_account { + Some(ref mut account) => f(account), + x @ None => { + let mut account = Default::default(); + let r = f(&mut account); + *x = Some(account); + r + } + }) + } + + /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. + /// + /// This is infallible as long as the value does not get destroyed. + fn mutate_exists( + k: &K, + f: impl FnOnce(&mut Option) -> R, + ) -> Result { + Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) + } + /// Set the item to something new. - fn insert(k: &K, t: T) { Self::mutate(k, |i| *i = t); } + fn insert(k: &K, t: T) -> Result<(), StoredMapError> { Self::mutate(k, |i| *i = t) } + /// Remove the item or otherwise replace it with its default value; we don't care which. - fn remove(k: &K); + fn remove(k: &K) -> Result<(), StoredMapError> { Self::mutate_exists(k, |x| *x = None) } } /// A simple, generic one-parameter event notifier/handler. -pub trait Happened { - /// The thing happened. - fn happened(t: &T); -} +pub trait HandleLifetime { + /// An account was created. + fn created(_t: &T) -> Result<(), StoredMapError> { Ok(()) } -impl Happened for () { - fn happened(_: &T) {} + /// An account was killed. + fn killed(_t: &T) -> Result<(), StoredMapError> { Ok(()) } } +impl HandleLifetime for () {} + /// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this /// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this /// would break the ability to have custom impls of `StoredValue`. The other workaround is to @@ -347,68 +429,63 @@ impl Happened for () { /// be the default value), or where the account is being removed or reset back to the default value /// where previously it did exist (though may have been in a default state). This works well with /// system module's `CallOnCreatedAccount` and `CallKillAccount`. -pub struct StorageMapShim< - S, - Created, - Removed, - K, - T ->(sp_std::marker::PhantomData<(S, Created, Removed, K, T)>); +pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); impl< S: StorageMap, - Created: Happened, - Removed: Happened, + L: HandleLifetime, K: FullCodec, - T: FullCodec, -> StoredMap for StorageMapShim { + T: FullCodec + Default, +> StoredMap for StorageMapShim { fn get(k: &K) -> T { S::get(k) } - fn is_explicit(k: &K) -> bool { S::contains_key(k) } - fn insert(k: &K, t: T) { - let existed = S::contains_key(&k); - S::insert(k, t); - if !existed { - Created::happened(k); + fn insert(k: &K, t: T) -> Result<(), StoredMapError> { + if !S::contains_key(&k) { + L::created(k)?; } + S::insert(k, t); + Ok(()) } - fn remove(k: &K) { - let existed = S::contains_key(&k); - S::remove(k); - if existed { - Removed::happened(&k); + fn remove(k: &K) -> Result<(), StoredMapError> { + if S::contains_key(&k) { + L::killed(&k)?; + S::remove(k); } + Ok(()) } - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R { - let existed = S::contains_key(&k); - let r = S::mutate(k, f); - if !existed { - Created::happened(k); + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + if !S::contains_key(&k) { + L::created(k)?; } - r + Ok(S::mutate(k, f)) } - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R { - let (existed, exists, r) = S::mutate_exists(k, |maybe_value| { + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { + S::try_mutate_exists(k, |maybe_value| { let existed = maybe_value.is_some(); let r = f(maybe_value); - (existed, maybe_value.is_some(), r) - }); - if !existed && exists { - Created::happened(k); - } else if existed && !exists { - Removed::happened(k); - } - r + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k)?; + } else if existed && !exists { + L::killed(k)?; + } + Ok(r) + }) } - fn try_mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> Result) -> Result { + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { S::try_mutate_exists(k, |maybe_value| { let existed = maybe_value.is_some(); - f(maybe_value).map(|v| (existed, maybe_value.is_some(), v)) - }).map(|(existed, exists, v)| { + let r = f(maybe_value)?; + let exists = maybe_value.is_some(); + if !existed && exists { - Created::happened(k); + L::created(k).map_err(E::from)?; } else if existed && !exists { - Removed::happened(k); + L::killed(k).map_err(E::from)?; } - v + Ok(r) }) } } @@ -507,18 +584,6 @@ pub trait ContainsLengthBound { fn max_len() -> usize; } -/// Determiner to say whether a given account is unused. -pub trait IsDeadAccount { - /// Is the given account dead? - fn is_dead_account(who: &AccountId) -> bool; -} - -impl IsDeadAccount for () { - fn is_dead_account(_who: &AccountId) -> bool { - true - } -} - /// Handler for when a new account has been created. #[impl_for_tuples(30)] pub trait OnNewAccount { @@ -1261,16 +1326,16 @@ pub trait ChangeMembers { /// /// This resets any previous value of prime. fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { - let (incoming, outgoing) = Self::compute_members_diff(new_members, old_members); + let (incoming, outgoing) = Self::compute_members_diff_sorted(new_members, old_members); Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); } - /// Compute diff between new and old members; they **must already be sorted**. + /// Compute diff between new and old members; they **must already be sorted**. /// /// Returns incoming and outgoing members. - fn compute_members_diff( + fn compute_members_diff_sorted( new_members: &[AccountId], - old_members: &[AccountId] + old_members: &[AccountId], ) -> (Vec, Vec) { let mut old_iter = old_members.iter(); let mut new_iter = new_members.iter(); @@ -1304,6 +1369,11 @@ pub trait ChangeMembers { /// Set the prime member. fn set_prime(_prime: Option) {} + + /// Get the current prime. + fn get_prime() -> Option { + None + } } impl ChangeMembers for () { @@ -1389,11 +1459,6 @@ pub trait PalletInfo { fn name() -> Option<&'static str>; } -impl PalletInfo for () { - fn index() -> Option { Some(0) } - fn name() -> Option<&'static str> { Some("test") } -} - /// The function and pallet name of the Call. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] pub struct CallMetadata { @@ -1427,6 +1492,9 @@ pub trait GetCallMetadata { #[impl_for_tuples(30)] pub trait OnFinalize { /// The block is being finalized. Implement to have something happen. + /// + /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, + /// including inherent extrinsics. fn on_finalize(_n: BlockNumber) {} } @@ -1438,6 +1506,10 @@ pub trait OnInitialize { /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. + /// + /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, + /// including inherent extrinsics. Hence for instance, if you runtime includes + /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } } @@ -1569,7 +1641,7 @@ pub mod schedule { /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed /// only if it is executed *before* the currently scheduled block. For periodic tasks, /// this dispatch is guaranteed to succeed only before the *initial* execution; for - /// others, use `reschedule_named`. + /// others, use `reschedule_named`. /// /// Will return an error if the `address` is invalid. fn reschedule( @@ -1646,16 +1718,16 @@ pub trait EnsureOrigin { /// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by /// `construct_runtime` and `impl_outer_dispatch`. pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Trait::Origin`). + /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). type Origin; /// Dispatch this call but do not check the filter in origin. fn dispatch_bypass_filter(self, origin: Self::Origin) -> crate::dispatch::DispatchResultWithPostInfo; } -/// Methods available on `frame_system::Trait::Origin`. +/// Methods available on `frame_system::Config::Origin`. pub trait OriginTrait: Sized { - /// Runtime call type, as in `frame_system::Trait::Call` + /// Runtime call type, as in `frame_system::Config::Call` type Call; /// The caller origin, overarching type of all pallets origins. @@ -1667,7 +1739,7 @@ pub trait OriginTrait: Sized { /// Add a filter to the origin. fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); - /// Reset origin filters to default one, i.e `frame_system::Trait::BaseCallFilter`. + /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. fn reset_filter(&mut self); /// Replace the caller with caller from the other origin @@ -1679,13 +1751,13 @@ pub trait OriginTrait: Sized { /// Get the caller. fn caller(&self) -> &Self::PalletsOrigin; - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self; /// Create with system root origin and no filter. fn root() -> Self; - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. fn signed(by: Self::AccountId) -> Self; } @@ -1724,6 +1796,30 @@ pub trait Instance: 'static { const PREFIX: &'static str; } +/// An instance of a storage in a pallet. +/// +/// Define an instance for an individual storage inside a pallet. +/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is +/// used to isolate storages inside a pallet. +/// +/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which +/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` +pub trait StorageInstance { + /// Prefix of a pallet to isolate it from other pallets. + fn pallet_prefix() -> &'static str; + + /// Prefix given to a storage to isolate from other storages in the pallet. + const STORAGE_PREFIX: &'static str; +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl crate::traits::Get for GetDefault { + fn get() -> T { + T::default() + } +} + /// A trait similar to `Convert` to convert values from `B` an abstract balance type /// into u64 and back from u128. (This conversion is used in election and other places where complex /// calculation over balance type is needed) @@ -1838,6 +1934,79 @@ pub trait IsSubType { fn is_sub_type(&self) -> Option<&T>; } +/// The pallet hooks trait. Implementing this lets you express some logic to execute. +pub trait Hooks { + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} + + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } + + /// Perform a module upgrade. + /// + /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it + /// doesn't include the write of the pallet version in storage. The final complete logic + /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by + /// `Pallet`. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + + /// Implementing this function on a module allows you to perform long-running tasks + /// that make (by default) validators generate transactions that feed results + /// of those long-running computations back on chain. + /// + /// NOTE: This function runs off-chain, so it can access the block state, + /// but cannot preform any alterations. More specifically alterations are + /// not forbidden, but they are not persisted in any way after the worker + /// has finished. + /// + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} + + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +/// A trait to define the build function of a genesis config, T and I are placeholder for pallet +/// trait and pallet instance. +#[cfg(feature = "std")] +pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + /// The build function is called within an externalities allowing storage APIs. + /// Thus one can write to storage using regular pallet storages. + fn build(&self); + + /// Build the storage using `build` inside default storage. + fn build_storage(&self) -> Result { + let mut storage = Default::default(); + self.assimilate_storage(&mut storage)?; + Ok(storage) + } + + /// Assimilate the storage for this module into pre-existing overlays. + fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { + sp_state_machine::BasicExternalities::execute_with_storage(storage, || { + self.build(); + Ok(()) + }) + } +} + /// The storage key postfix that is used to store the [`PalletVersion`] per pallet. /// /// The full storage key is built by using: @@ -1870,7 +2039,7 @@ impl PalletVersion { /// Returns the storage key for a pallet version. /// - /// See [`PALLET_VERSION_STORAGE_KEY_POSTIFX`] on how this key is built. + /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. /// /// Returns `None` if the given `PI` returned a `None` as name for the given /// `Pallet`. diff --git a/frame/support/src/unsigned.rs b/frame/support/src/unsigned.rs index 16c434fe638bc..71ae31d95d198 100644 --- a/frame/support/src/unsigned.rs +++ b/frame/support/src/unsigned.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 74f0773aa541f..abd54994bc9e8 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,7 @@ //! - [`ClassifyDispatch`]: class of the dispatch. //! - [`PaysFee`]: weather this weight should be translated to fee and deducted upon dispatch. //! -//! Substrate then bundles then output information of the two traits into [`DispatchInfo`] struct +//! Substrate then bundles the output information of the three traits into [`DispatchInfo`] struct //! and provides it by implementing the [`GetDispatchInfo`] for all `Call` both inner and outer call //! types. //! @@ -39,9 +39,9 @@ //! `Yes`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 1000] //! fn dispatching(origin) { unimplemented!() } //! } @@ -52,10 +52,10 @@ //! 2.1 Define weight and class, **in which case `PaysFee` would be `Yes`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::DispatchClass; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, DispatchClass::Operational)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -66,10 +66,10 @@ //! 2.2 Define weight and `PaysFee`, **in which case `ClassifyDispatch` would be `Normal`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::Pays; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, Pays::No)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -80,10 +80,10 @@ //! 3. Define all 3 parameters. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::{DispatchClass, Pays}; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, DispatchClass::Operational, Pays::No)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -91,19 +91,20 @@ //! # fn main() {} //! ``` //! -//! ### 2. Define weights as a function of input arguments using `FunctionOf` tuple struct. This struct works -//! in a similar manner as above. 3 items must be provided and each can be either a fixed value or a -//! function/closure with the same parameters list as the dispatchable function itself, wrapper in a -//! tuple. +//! ### 2. Define weights as a function of input arguments using `FunctionOf` tuple struct. +//! +//! This struct works in a similar manner as above. 3 items must be provided and each can be either +//! a fixed value or a function/closure with the same parameters list as the dispatchable function +//! itself, wrapper in a tuple. //! //! Using this only makes sense if you want to use a function for at least one of the elements. If //! all 3 are static values, providing a raw tuple is easier. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::{DispatchClass, FunctionOf, Pays}; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = FunctionOf( //! // weight, function. //! |args: (&u32, &u64)| *args.0 as u64 + args.1, @@ -213,6 +214,9 @@ impl Default for Pays { } /// A generalized group of dispatch types. +/// +/// NOTE whenever upgrading the enum make sure to also update +/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] @@ -242,6 +246,39 @@ impl Default for DispatchClass { } } +impl DispatchClass { + /// Returns an array containing all dispatch classes. + pub fn all() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] + } + + /// Returns an array of all dispatch classes except `Mandatory`. + pub fn non_mandatory() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational] + } +} + +/// A trait that represents one or many values of given type. +/// +/// Useful to accept as parameter type to let the caller pass either a single value directly +/// or an iterator. +pub trait OneOrMany { + /// The iterator type. + type Iter: Iterator; + /// Convert this item into an iterator. + fn into_iter(self) -> Self::Iter; +} + +impl OneOrMany for DispatchClass { + type Iter = sp_std::iter::Once; + fn into_iter(self) -> Self::Iter { sp_std::iter::once(self) } +} + +impl<'a> OneOrMany for &'a [DispatchClass] { + type Iter = sp_std::iter::Cloned>; + fn into_iter(self) -> Self::Iter { self.iter().cloned() } +} + /// Primitives related to priority management of Frame. pub mod priority { /// The starting point of all Operational transactions. 3/4 of u64::max_value(). @@ -695,13 +732,94 @@ impl WeightToFeePolynomial for IdentityFee where } } +/// A struct holding value for each `DispatchClass`. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +pub struct PerDispatchClass { + /// Value for `Normal` extrinsics. + normal: T, + /// Value for `Operational` extrinsics. + operational: T, + /// Value for `Mandatory` extrinsics. + mandatory: T, +} + +impl PerDispatchClass { + /// Create new `PerDispatchClass` with the same value for every class. + pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { + Self { + normal: val(DispatchClass::Normal), + operational: val(DispatchClass::Operational), + mandatory: val(DispatchClass::Mandatory), + } + } + + /// Get a mutable reference to current value of given class. + pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { + match class { + DispatchClass::Operational => &mut self.operational, + DispatchClass::Normal => &mut self.normal, + DispatchClass::Mandatory => &mut self.mandatory, + } + } + + /// Get current value for given class. + pub fn get(&self, class: DispatchClass) -> &T { + match class { + DispatchClass::Normal => &self.normal, + DispatchClass::Operational => &self.operational, + DispatchClass::Mandatory => &self.mandatory, + } + } +} + +impl PerDispatchClass { + /// Set the value of given class. + pub fn set(&mut self, new: T, class: impl OneOrMany) { + for class in class.into_iter() { + *self.get_mut(class) = new.clone(); + } + } +} + +impl PerDispatchClass { + /// Returns the total weight consumed by all extrinsics in the block. + pub fn total(&self) -> Weight { + let mut sum = 0; + for class in DispatchClass::all() { + sum = sum.saturating_add(*self.get(*class)); + } + sum + } + + /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. + pub fn add(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_add(weight); + } + + /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would + /// occur. + pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + let value = self.get_mut(class); + *value = value.checked_add(weight).ok_or(())?; + Ok(()) + } + + /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of + /// `Weight`. + pub fn sub(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_sub(weight); + } +} + #[cfg(test)] #[allow(dead_code)] mod tests { use crate::{decl_module, parameter_types, traits::Get}; use super::*; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type Balance; type BlockNumber; @@ -718,16 +836,16 @@ mod tests { }; } - impl Trait for TraitImpl { + impl Config for TraitImpl { type Origin = u32; type BlockNumber = u32; type Balance = u32; type DbWeight = DbWeight; - type PalletInfo = (); + type PalletInfo = crate::tests::PanicPalletInfo; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { // no arguments, fixed weight #[weight = 1000] fn f00(_origin) { unimplemented!(); } @@ -749,7 +867,7 @@ mod tests { fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] - fn f2(_origin) { unimplemented!(); } + fn f20(_origin) { unimplemented!(); } #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] fn f21(_origin) { unimplemented!(); } @@ -783,13 +901,29 @@ mod tests { assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); - assert_eq!(Call::::f11(10, 20).get_dispatch_info().weight, 120); - assert_eq!(Call::::f11(10, 20).get_dispatch_info().class, DispatchClass::Normal); - assert_eq!(Call::::f12(10, 20).get_dispatch_info().weight, 0); - assert_eq!(Call::::f12(10, 20).get_dispatch_info().class, DispatchClass::Operational); - assert_eq!(Call::::f2().get_dispatch_info().weight, 12300); - assert_eq!(Call::::f21().get_dispatch_info().weight, 45600); - assert_eq!(Call::::f2().get_dispatch_info().class, DispatchClass::Normal); + // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + let info = Call::::f11(13, 20).get_dispatch_info(); + assert_eq!(info.weight, 150); // 13*10 + 20 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (0, DispatchClass::Operational, Pays::Yes)] + let info = Call::::f12(10, 20).get_dispatch_info(); + assert_eq!(info.weight, 0); + assert_eq!(info.class, DispatchClass::Operational); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + let info = Call::::f20().get_dispatch_info(); + assert_eq!(info.weight, 12300); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + let info = Call::::f21().get_dispatch_info(); + assert_eq!(info.weight, 45600); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); } #[test] @@ -821,7 +955,7 @@ mod tests { type Balance = u64; - // 0.5x^3 + 2.333x2 + 7x - 10_000 + // 0.5x^3 + 2.333x^2 + 7x - 10_000 struct Poly; impl WeightToFeePolynomial for Poly { type Balance = Balance; @@ -858,13 +992,16 @@ mod tests { #[test] fn polynomial_works() { + // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 assert_eq!(Poly::calc(&100), 514033); + // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 assert_eq!(Poly::calc(&10_123), 518917034928); } #[test] fn polynomial_does_not_underflow() { assert_eq!(Poly::calc(&0), 0); + assert_eq!(Poly::calc(&10), 0); } #[test] diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index ee8ace5c983c5..67cf668f7f4c9 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-test" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,18 +13,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-io = { version = "2.0.0", path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -trybuild = "1.0.33" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-io = { version = "3.0.0", path = "../../../primitives/io", default-features = false } +sp-state-machine = { version = "0.9.0", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "3.0.0", default-features = false, path = "../" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../../primitives/inherents" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +trybuild = "1.0.38" pretty_assertions = "0.6.1" rustversion = "1.0.0" -frame-metadata = { version = "12.0.0", default-features = false, path = "../../metadata" } +frame-metadata = { version = "13.0.0", default-features = false, path = "../../metadata" } +frame-system = { version = "3.0.0", default-features = false, path = "../../system" } [features] default = ["std"] @@ -33,6 +34,7 @@ std = [ "codec/std", "sp-io/std", "frame-support/std", + "frame-system/std", "sp-inherents/std", "sp-core/std", "sp-std/std", diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index a917c781c065c..4b1510bf81f4d 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ mod pallet_version; /// The configuration trait -pub trait Trait: 'static { +pub trait Config: 'static { /// The runtime origin type. type Origin: codec::Codec + codec::EncodeLike + Default; /// The block number type. @@ -39,5 +39,17 @@ pub trait Trait: 'static { frame_support::decl_module! { /// Some test module - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} +} + +/// A PalletInfo implementation which just panics. +pub struct PanicPalletInfo; + +impl frame_support::traits::PalletInfo for PanicPalletInfo { + fn index() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } } diff --git a/frame/support/test/src/pallet_version.rs b/frame/support/test/src/pallet_version.rs index 5912bd5b8e47f..aaa46c3ef2c60 100644 --- a/frame/support/test/src/pallet_version.rs +++ b/frame/support/test/src/pallet_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 4ff4fc6828604..8dc44c2024adc 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #![recursion_limit="128"] -use sp_runtime::{generic, traits::{BlakeTwo256, Block as _, Verify}, DispatchError}; +use sp_runtime::{generic, traits::{BlakeTwo256, Verify}, DispatchError}; use sp_core::{H256, sr25519}; use sp_std::cell::RefCell; use frame_support::traits::PalletInfo as _; @@ -37,11 +37,11 @@ thread_local! { mod module1 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::Origin, system=system + pub struct Module, I: Instance = DefaultInstance> for enum Call + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -55,31 +55,31 @@ mod module1 { frame_support::decl_event! { pub enum Event where - ::AccountId + ::AccountId { A(AccountId), } } frame_support::decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { Something } } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module {} + trait Store for Module, I: Instance=DefaultInstance> as Module {} } } mod module2 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module for enum Call - where origin: ::Origin, system=system + pub struct Module for enum Call + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -102,25 +102,25 @@ mod module2 { } frame_support::decl_error! { - pub enum Error for Module { + pub enum Error for Module { Something } } frame_support::decl_storage! { - trait Store for Module as Module {} + trait Store for Module as Module {} } } -impl module1::Trait for Runtime {} -impl module2::Trait for Runtime {} +impl module1::Config for Runtime {} +impl module2::Config for Runtime {} pub type Signature = sr25519::Signature; pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter = (); type Hash = H256; type Origin = Origin; diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index e1624c76830ae..a55e800628582 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/construct_runtime_ui/*.rs"); diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs new file mode 100644 index 0000000000000..bc242a57a41e5 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Module}, + Balance: balances::{Module}, + Balance: balances::{Module}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr new file mode 100644 index 0000000000000..f5b999db66a41 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr @@ -0,0 +1,11 @@ +error: Two modules with the same name! + --> $DIR/conflicting_module_name.rs:10:3 + | +10 | Balance: balances::{Module}, + | ^^^^^^^ + +error: Two modules with the same name! + --> $DIR/conflicting_module_name.rs:11:3 + | +11 | Balance: balances::{Module}, + | ^^^^^^^ diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs index 7df64bc52f412..2c097bb6e1332 100644 --- a/frame/support/test/tests/decl_module_ui.rs +++ b/frame/support/test/tests/decl_module_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn decl_module_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/decl_module_ui/*.rs"); diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs index 56eff29c5dc1b..cc7c1ff219d8b 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn integrity_test() {} fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 25f3b891d9b47..3bf5f58b43a39 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -2,7 +2,7 @@ error: `integrity_test` can only be passed once as input. --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index 3e1bc25c8d59c..ddde7c72c1cc5 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn on_initialize() -> Weight { 0 } diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 34c5ff3f941a1..2911d7ded8a23 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -2,7 +2,7 @@ error: `on_initialize` can only be passed once as input. --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index 8d5727ce9104b..a2690b1379db5 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,13 +24,13 @@ mod tests { use std::marker::PhantomData; frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { // non-getters: pub / $default /// Hello, this is doc! @@ -81,14 +81,14 @@ mod tests { struct TraitImpl {} - impl frame_support_test::Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } - impl Trait for TraitImpl {} + impl Config for TraitImpl {} const EXPECTED_METADATA: StorageMetadata = StorageMetadata { prefix: DecodeDifferent::Encode("TestStorage"), @@ -414,16 +414,16 @@ mod tests { #[cfg(test)] #[allow(dead_code)] mod test2 { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } type PairOf = (T, T); frame_support::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { SingleDef : u32; PairDef : PairOf; Single : Option; @@ -438,26 +438,26 @@ mod test2 { struct TraitImpl {} - impl frame_support_test::Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } - impl Trait for TraitImpl {} + impl Config for TraitImpl {} } #[cfg(test)] #[allow(dead_code)] mod test3 { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { Foo get(fn foo) config(initial_foo): u32; } } @@ -466,14 +466,14 @@ mod test3 { struct TraitImpl {} - impl frame_support_test::Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } - impl Trait for TraitImpl {} + impl Config for TraitImpl {} } #[cfg(test)] @@ -482,17 +482,17 @@ mod test_append_and_len { use sp_io::TestExternalities; use codec::{Encode, Decode}; - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { NoDefault: Option; JustVec: Vec; @@ -511,14 +511,14 @@ mod test_append_and_len { struct Test {} - impl frame_support_test::Trait for Test { + impl frame_support_test::Config for Test { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } - impl Trait for Test {} + impl Config for Test {} #[test] fn default_for_option() { diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 56529d62c28ff..99d2da87aca28 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn decl_storage_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/decl_storage_ui/*.rs"); diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs index 58923ed19297c..17f80c8c84755 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value config(value): u32; pub Value2 config(value): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs index e77dcea404ccb..fec6aeb64cec4 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value get(fn value) config(): u32; pub Value2 config(value): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs index b6ccb7ebb7b7b..13c57a638bb18 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value get(fn value) config(): u32; pub Value2 get(fn value) config(): u32; } diff --git a/frame/support/test/tests/derive_no_bound.rs b/frame/support/test/tests/derive_no_bound.rs index 29f813c6498bb..b96fbcfba931d 100644 --- a/frame/support/test/tests/derive_no_bound.rs +++ b/frame/support/test/tests/derive_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,19 +28,19 @@ fn runtime_debug_no_bound_display_correctly() { assert_eq!(format!("{:?}", Unnamed(1)), "Unnamed(1)"); } -trait Trait { +trait Config { type C: std::fmt::Debug + Clone + Eq + PartialEq; } struct Runtime; struct ImplNone; -impl Trait for Runtime { +impl Config for Runtime { type C = u32; } #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -struct StructNamed { +struct StructNamed { a: u32, b: u64, c: T::C, @@ -77,7 +77,7 @@ fn test_struct_named() { } #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); +struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); #[test] fn test_struct_unnamed() { @@ -109,7 +109,7 @@ fn test_struct_unnamed() { } #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -enum Enum { +enum Enum { VariantUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>), VariantNamed { a: u32, diff --git a/frame/support/test/tests/derive_no_bound_ui.rs b/frame/support/test/tests/derive_no_bound_ui.rs index da276018f7f8e..434671e19b105 100644 --- a/frame/support/test/tests/derive_no_bound_ui.rs +++ b/frame/support/test/tests/derive_no_bound_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn derive_no_bound_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/derive_no_bound_ui/*.rs"); diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.rs b/frame/support/test/tests/derive_no_bound_ui/clone.rs index 6b80dcedc3880..2bc1cc492d171 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.rs +++ b/frame/support/test/tests/derive_no_bound_ui/clone.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::CloneNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.stderr b/frame/support/test/tests/derive_no_bound_ui/clone.stderr index 4b9cccf0b0fa1..4b253ad12451b 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/clone.stderr @@ -1,7 +1,7 @@ -error[E0277]: the trait bound `::C: std::clone::Clone` is not satisfied +error[E0277]: the trait bound `::C: Clone` is not satisfied --> $DIR/clone.rs:7:2 | 7 | c: T::C, - | ^ the trait `std::clone::Clone` is not implemented for `::C` + | ^ the trait `Clone` is not implemented for `::C` | - = note: required by `std::clone::Clone::clone` + = note: required by `clone` diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.rs b/frame/support/test/tests/derive_no_bound_ui/debug.rs index f2411da4b41bc..6016c3e6d98b8 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.rs +++ b/frame/support/test/tests/derive_no_bound_ui/debug.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::DebugNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/frame/support/test/tests/derive_no_bound_ui/debug.stderr index 838bd7f68a65f..7580cab2ea0b3 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -1,8 +1,8 @@ -error[E0277]: `::C` doesn't implement `std::fmt::Debug` +error[E0277]: `::C` doesn't implement `std::fmt::Debug` --> $DIR/debug.rs:7:2 | 7 | c: T::C, - | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::C` + = help: the trait `std::fmt::Debug` is not implemented for `::C` = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.rs b/frame/support/test/tests/derive_no_bound_ui/eq.rs index 9e4026734fbeb..a48452626368c 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.rs +++ b/frame/support/test/tests/derive_no_bound_ui/eq.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::EqNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index 08341c4d65ab5..bbd907adecb33 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -1,7 +1,7 @@ error[E0277]: can't compare `Foo` with `Foo` --> $DIR/eq.rs:6:8 | -6 | struct Foo { +6 | struct Foo { | ^^^ no implementation for `Foo == Foo` | - = help: the trait `std::cmp::PartialEq` is not implemented for `Foo` + = help: the trait `PartialEq` is not implemented for `Foo` diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs b/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs index 1720776a40029..7bd6b7ef6a2e3 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::PartialEqNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr index d85757c520aa1..64f844e547be0 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr @@ -1,7 +1,7 @@ -error[E0369]: binary operation `==` cannot be applied to type `::C` +error[E0369]: binary operation `==` cannot be applied to type `::C` --> $DIR/partial_eq.rs:7:2 | 7 | c: T::C, | ^ | - = note: the trait `std::cmp::PartialEq` is not implemented for `::C` + = note: the trait `std::cmp::PartialEq` is not implemented for `::C` diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index 6bd1252825466..9839a3d3b2d94 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,14 +21,14 @@ use frame_support::{StorageDoubleMap, StorageMap, StorageValue, StoragePrefixedM use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; mod no_instance { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value config(value): u32; pub Map: map hasher(blake2_128_concat) u32 => u32; @@ -45,15 +45,15 @@ mod no_instance { } mod instance { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module, I: Instance = DefaultInstance> + trait Store for Module, I: Instance = DefaultInstance> as FinalKeysSome { pub Value config(value): u32; diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index f268f11a4dc15..a30b021d13e51 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,28 +15,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { pub AppendableDM config(t): double_map hasher(identity) u32, hasher(identity) T::BlockNumber => Vec; } } struct Test; -impl frame_support_test::Trait for Test { +impl frame_support_test::Config for Test { type BlockNumber = u32; type Origin = (); - type PalletInfo = (); + type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } -impl Trait for Test {} +impl Config for Test {} #[test] fn init_genesis_config() { diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 6c90767f92e57..f7d79b7d4bf6e 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #![recursion_limit="128"] use codec::{Codec, EncodeLike, Encode, Decode}; -use sp_runtime::{generic, BuildStorage, traits::{BlakeTwo256, Block as _, Verify}}; +use sp_runtime::{generic, BuildStorage, traits::{BlakeTwo256, Verify}}; use frame_support::{ Parameter, traits::Get, parameter_types, metadata::{ @@ -41,16 +41,16 @@ mod module1 { use super::*; use sp_std::ops::Add; - pub trait Trait: system::Trait where ::BlockNumber: From { - type Event: From> + Into<::Event>; + pub trait Config: system::Config where ::BlockNumber: From { + type Event: From> + Into<::Event>; type Origin: From>; type SomeParameter: Get; type GenericType: Default + Clone + Codec + EncodeLike; } frame_support::decl_module! { - pub struct Module, I: Instance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance> for enum Call where + origin: ::Origin, system = system, T::BlockNumber: From { @@ -67,7 +67,7 @@ mod module1 { } frame_support::decl_storage! { - trait Store for Module, I: Instance> as Module1 where + trait Store for Module, I: Instance> as Module1 where T::BlockNumber: From + std::fmt::Display { pub Value config(value): T::GenericType; @@ -83,7 +83,7 @@ mod module1 { } frame_support::decl_error! { - pub enum Error for Module, I: Instance> where + pub enum Error for Module, I: Instance> where T::BlockNumber: From, T::BlockNumber: Add, T::AccountId: AsRef<[u8]>, @@ -101,14 +101,14 @@ mod module1 { } #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I> where T::BlockNumber: From { + pub enum Origin, I> where T::BlockNumber: From { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module where + impl, I: Instance> ProvideInherent for Module where T::BlockNumber: From { type Call = Call; @@ -131,17 +131,17 @@ mod module1 { mod module2 { use super::*; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Amount: Parameter + Default; - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; type Origin: From>; } - impl, I: Instance> Currency for Module {} + impl, I: Instance> Currency for Module {} frame_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance=DefaultInstance> for enum Call where + origin: ::Origin, system = system { fn deposit_event() = default; @@ -149,7 +149,7 @@ mod module2 { } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 { + trait Store for Module, I: Instance=DefaultInstance> as Module2 { pub Value config(value): T::Amount; pub Map config(map): map hasher(identity) u64 => u64; pub DoubleMap config(double_map): double_map hasher(identity) u64, hasher(identity) u64 => u64; @@ -157,20 +157,20 @@ mod module2 { } frame_support::decl_event! { - pub enum Event where Amount = >::Amount { + pub enum Event where Amount = >::Amount { Variant(Amount), } } #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I=DefaultInstance> { + pub enum Origin, I=DefaultInstance> { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module { + impl, I: Instance> ProvideInherent for Module { type Call = Call; type Error = MakeFatalError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; @@ -190,13 +190,13 @@ mod module2 { mod module3 { use super::*; - pub trait Trait: module2::Trait + module2::Trait + system::Trait { + pub trait Config: module2::Config + module2::Config + system::Config { type Currency: Currency; type Currency2: Currency; } frame_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin, system=system {} + pub struct Module for enum Call where origin: ::Origin, system=system {} } } @@ -204,39 +204,39 @@ parameter_types! { pub const SomeValue: u32 = 100; } -impl module1::Trait for Runtime { +impl module1::Config for Runtime { type Event = Event; type Origin = Origin; type SomeParameter = SomeValue; type GenericType = u32; } -impl module1::Trait for Runtime { +impl module1::Config for Runtime { type Event = Event; type Origin = Origin; type SomeParameter = SomeValue; type GenericType = u32; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u16; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u32; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u32; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u64; type Event = Event; type Origin = Origin; } -impl module3::Trait for Runtime { +impl module3::Config for Runtime { type Currency = Module2_2; type Currency2 = Module2_3; } @@ -246,14 +246,14 @@ pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter= (); type Hash = H256; type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type PalletInfo = (); + type PalletInfo = PalletInfo; type Call = Call; type DbWeight = (); } diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 596a3b6ffb25d..4eacca9daca01 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,7 @@ // limitations under the License. use frame_support::sp_runtime::generic; -use frame_support::sp_runtime::traits::{BlakeTwo256, Block as _, Verify}; +use frame_support::sp_runtime::traits::{BlakeTwo256, Verify}; use frame_support::codec::{Encode, Decode}; use sp_core::{H256, sr25519}; use serde::{Serialize, Deserialize}; @@ -27,9 +27,9 @@ mod module { use super::*; pub type Request = ( - ::AccountId, + ::AccountId, Role, - ::BlockNumber, + ::BlockNumber, ); pub type Requests = Vec>; @@ -39,7 +39,7 @@ mod module { } #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] - pub struct RoleParameters { + pub struct RoleParameters { // minimum actors to maintain - if role is unstaking // and remaining actors would be less that this value - prevent or punish for unstaking pub min_actors: u32, @@ -65,7 +65,7 @@ mod module { pub startup_grace_period: T::BlockNumber, } - impl Default for RoleParameters { + impl Default for RoleParameters { fn default() -> Self { Self { max_actors: 10, @@ -81,18 +81,18 @@ mod module { } } - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] - pub struct Data { + pub struct Data { pub data: T::BlockNumber, } - impl Default for Data { + impl Default for Data { fn default() -> Self { Self { data: T::BlockNumber::default(), @@ -101,7 +101,7 @@ mod module { } frame_support::decl_storage! { - trait Store for Module as Actors { + trait Store for Module as Actors { /// requirements to enter and maintain status in roles pub Parameters get(fn parameters) build(|config: &GenesisConfig| { if config.enable_storage_role { @@ -157,19 +157,19 @@ pub type Header = generic::Header; pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter = (); type Hash = H256; type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type PalletInfo = (); + type PalletInfo = PalletInfo; type Call = Call; type DbWeight = (); } -impl module::Trait for Runtime {} +impl module::Config for Runtime {} frame_support::construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs new file mode 100644 index 0000000000000..8e0bacb9aa4af --- /dev/null +++ b/frame/support/test/tests/pallet.rs @@ -0,0 +1,765 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + weights::{DispatchInfo, DispatchClass, Pays, GetDispatchInfo}, + traits::{ + GetCallName, OnInitialize, OnFinalize, OnRuntimeUpgrade, GetPalletVersion, OnGenesis, + }, + dispatch::{UnfilteredDispatchable, Parameter}, + storage::unhashed, +}; +use sp_runtime::DispatchError; +use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; + +pub struct SomeType1; +impl From for u64 { fn from(_t: SomeType1) -> Self { 0u64 } } + +pub struct SomeType2; +impl From for u64 { fn from(_t: SomeType2) -> Self { 100u64 } } + +pub struct SomeType3; +impl From for u64 { fn from(_t: SomeType3) -> Self { 0u64 } } + +pub struct SomeType4; +impl From for u64 { fn from(_t: SomeType4) -> Self { 0u64 } } + +pub struct SomeType5; +impl From for u64 { fn from(_t: SomeType5) -> Self { 0u64 } } + +pub struct SomeType6; +impl From for u64 { fn from(_t: SomeType6) -> Self { 0u64 } } + +pub struct SomeType7; +impl From for u64 { fn from(_t: SomeType7) -> Self { 0u64 } } + +pub trait SomeAssociation1 { type _1: Parameter; } +impl SomeAssociation1 for u64 { type _1 = u64; } + +pub trait SomeAssociation2 { type _2: Parameter; } +impl SomeAssociation2 for u64 { type _2 = u64; } + +#[frame_support::pallet] +pub mod pallet { + use super::{ + SomeType1, SomeType2, SomeType3, SomeType4, SomeType5, SomeType6, SomeType7, + SomeAssociation1, SomeAssociation2, + }; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + type BalanceOf = ::Balance; + + #[pallet::config] + pub trait Config: frame_system::Config + where ::AccountId: From + SomeAssociation1, + { + /// Some comment + /// Some comment + #[pallet::constant] + type MyGetParam: Get; + + /// Some comment + /// Some comment + #[pallet::constant] + type MyGetParam2: Get; + + #[pallet::constant] + type MyGetParam3: Get<::_1>; + + type Balance: Parameter + Default; + + type Event: From> + IsType<::Event>; + } + + #[pallet::extra_constants] + impl Pallet + where T::AccountId: From + SomeAssociation1 + From, + { + /// Some doc + /// Some doc + fn some_extra() -> T::AccountId { SomeType2.into() } + + /// Some doc + fn some_extra_extra() -> T::AccountId { SomeType1.into() } + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet + where T::AccountId: From + From + SomeAssociation1, + { + fn on_initialize(_: BlockNumberFor) -> Weight { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + Self::deposit_event(Event::Something(10)); + 10 + } + fn on_finalize(_: BlockNumberFor) { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + Self::deposit_event(Event::Something(20)); + } + fn on_runtime_upgrade() -> Weight { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + Self::deposit_event(Event::Something(30)); + 30 + } + fn integrity_test() { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + } + } + + #[pallet::call] + impl Pallet + where T::AccountId: From + From + SomeAssociation1 + { + /// Doc comment put in metadata + #[pallet::weight(Weight::from(*_foo))] + fn foo( + origin: OriginFor, + #[pallet::compact] _foo: u32, + _bar: u32, + ) -> DispatchResultWithPostInfo { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType3); // Test for where clause + let _ = origin; + Self::deposit_event(Event::Something(3)); + Ok(().into()) + } + + /// Doc comment put in metadata + #[pallet::weight(1)] + #[frame_support::transactional] + fn foo_transactional( + _origin: OriginFor, + #[pallet::compact] foo: u32, + ) -> DispatchResultWithPostInfo { + Self::deposit_event(Event::Something(0)); + if foo == 0 { + Err(Error::::InsufficientProposersBalance)?; + } + + Ok(().into()) + } + } + + #[pallet::error] + pub enum Error { + /// doc comment put into metadata + InsufficientProposersBalance, + } + + #[pallet::event] + #[pallet::metadata(BalanceOf = "Balance", u32 = "Other")] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event where T::AccountId: SomeAssociation1 + From{ + /// doc comment put in metadata + Proposed(::AccountId), + /// doc + Spending(BalanceOf), + Something(u32), + SomethingElse(::_1), + } + + #[pallet::storage] + pub type ValueWhereClause where T::AccountId: SomeAssociation2 = + StorageValue<_, ::_2>; + + #[pallet::storage] + pub type Value = StorageValue<_, u32>; + + #[pallet::type_value] + pub fn MyDefault() -> u16 + where T::AccountId: From + From + SomeAssociation1 + { + T::AccountId::from(SomeType7); // Test where clause works + 4u16 + } + + #[pallet::storage] + pub type Map where T::AccountId: From = + StorageMap<_, Blake2_128Concat, u8, u16, ValueQuery, MyDefault>; + + #[pallet::storage] + pub type Map2 = StorageMap<_, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap2 = StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; + + #[pallet::genesis_config] + #[derive(Default)] + pub struct GenesisConfig { + _myfield: u32, + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig + where T::AccountId: From + SomeAssociation1 + From + { + fn build(&self) { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType4); // Test for where clause + } + } + + #[pallet::origin] + #[derive(EqNoBound, RuntimeDebugNoBound, CloneNoBound, PartialEqNoBound, Encode, Decode)] + pub struct Origin(PhantomData); + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet + where T::AccountId: From + SomeAssociation1 + From + From + { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call + ) -> TransactionValidity { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType5); // Test for where clause + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet + where T::AccountId: From + SomeAssociation1 + From + From + { + type Call = Call; + type Error = InherentError; + + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType6); // Test for where clause + unimplemented!(); + } + } + + #[derive(codec::Encode, sp_runtime::RuntimeDebug)] + #[cfg_attr(feature = "std", derive(codec::Decode))] + pub enum InherentError { + } + + impl sp_inherents::IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + unimplemented!(); + } + } + + pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; +} + +// Test that a pallet with non generic event and generic genesis_config is correctly handled +#[frame_support::pallet] +pub mod pallet2 { + use super::{SomeType1, SomeAssociation1}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config + where ::AccountId: From + SomeAssociation1, + { + type Event: From + IsType<::Event>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet + where T::AccountId: From + SomeAssociation1, + { + } + + #[pallet::call] + impl Pallet + where T::AccountId: From + SomeAssociation1, + { + } + + #[pallet::event] + pub enum Event { + /// Something + Something(u32), + } + + #[pallet::genesis_config] + pub struct GenesisConfig + where T::AccountId: From + SomeAssociation1, + { + phantom: PhantomData, + } + + impl Default for GenesisConfig + where T::AccountId: From + SomeAssociation1, + { + fn default() -> Self { + GenesisConfig { + phantom: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig + where T::AccountId: From + SomeAssociation1, + { + fn build(&self) {} + } +} + +frame_support::parameter_types!( + pub const MyGetParam: u32= 10; + pub const MyGetParam2: u32= 11; + pub const MyGetParam3: u32= 12; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type MyGetParam = MyGetParam; + type MyGetParam2 = MyGetParam2; + type MyGetParam3 = MyGetParam3; + type Balance = u64; +} + +impl pallet2::Config for Runtime { + type Event = Event; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Module, Call, Event}, + Example: pallet::{Module, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, + Example2: pallet2::{Module, Call, Event, Config, Storage}, + } +); + +#[test] +fn transactional_works() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + + pallet::Call::::foo_transactional(0).dispatch_bypass_filter(None.into()) + .err().unwrap(); + assert!(frame_system::Pallet::::events().is_empty()); + + pallet::Call::::foo_transactional(1).dispatch_bypass_filter(None.into()).unwrap(); + assert_eq!( + frame_system::Pallet::::events().iter().map(|e| &e.event).collect::>(), + vec![&Event::pallet(pallet::Event::Something(0))], + ); + }) +} + +#[test] +fn call_expand() { + let call_foo = pallet::Call::::foo(3, 0); + assert_eq!( + call_foo.get_dispatch_info(), + DispatchInfo { + weight: 3, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + } + ); + assert_eq!(call_foo.get_call_name(), "foo"); + assert_eq!( + pallet::Call::::get_call_names(), + &["foo", "foo_transactional"], + ); +} + +#[test] +fn error_expand() { + assert_eq!( + format!("{:?}", pallet::Error::::InsufficientProposersBalance), + String::from("InsufficientProposersBalance"), + ); + assert_eq!( + <&'static str>::from(pallet::Error::::InsufficientProposersBalance), + "InsufficientProposersBalance", + ); + assert_eq!( + DispatchError::from(pallet::Error::::InsufficientProposersBalance), + DispatchError::Module { + index: 1, + error: 0, + message: Some("InsufficientProposersBalance"), + }, + ); +} + +#[test] +fn instance_expand() { + // Assert same type. + let _: pallet::__InherentHiddenInstance = (); +} + +#[test] +fn pallet_expand_deposit_event() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + pallet::Call::::foo(3, 0).dispatch_bypass_filter(None.into()).unwrap(); + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::pallet(pallet::Event::Something(3)), + ); + }) +} + +#[test] +fn storage_expand() { + use frame_support::pallet_prelude::*; + use frame_support::StoragePrefixedMap; + + fn twox_64_concat(d: &[u8]) -> Vec { + let mut v = twox_64(d).to_vec(); + v.extend_from_slice(d); + v + } + + fn blake2_128_concat(d: &[u8]) -> Vec { + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v + } + + TestExternalities::default().execute_with(|| { + pallet::Value::::put(1); + let k = [twox_128(b"Example"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + pallet::Map::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u16)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::Map2::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::DoubleMap::::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::DoubleMap2::::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + }) +} + +#[test] +fn pallet_hooks_expand() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + + assert_eq!(AllModules::on_initialize(1), 10); + AllModules::on_finalize(1); + + assert_eq!(pallet::Pallet::::storage_version(), None); + assert_eq!(AllModules::on_runtime_upgrade(), 30); + assert_eq!( + pallet::Pallet::::storage_version(), + Some(pallet::Pallet::::current_version()), + ); + + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::pallet(pallet::Event::Something(10)), + ); + assert_eq!( + frame_system::Pallet::::events()[1].event, + Event::pallet(pallet::Event::Something(20)), + ); + assert_eq!( + frame_system::Pallet::::events()[2].event, + Event::pallet(pallet::Event::Something(30)), + ); + }) +} + +#[test] +fn pallet_on_genesis() { + TestExternalities::default().execute_with(|| { + assert_eq!(pallet::Pallet::::storage_version(), None); + pallet::Pallet::::on_genesis(); + assert_eq!( + pallet::Pallet::::storage_version(), + Some(pallet::Pallet::::current_version()), + ); + }) +} + +#[test] +fn metadata() { + use frame_metadata::*; + use codec::{Decode, Encode}; + + let expected_pallet_metadata = ModuleMetadata { + index: 1, + name: DecodeDifferent::Decoded("Example".to_string()), + storage: Some(DecodeDifferent::Decoded(StorageMetadata { + prefix: DecodeDifferent::Decoded("Example".to_string()), + entries: DecodeDifferent::Decoded(vec![ + StorageEntryMetadata { + name: DecodeDifferent::Decoded("ValueWhereClause".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain( + DecodeDifferent::Decoded( + "::_2".to_string() + ), + ), + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Value".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Decoded("u32".to_string())), + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Map".to_string()), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + key: DecodeDifferent::Decoded("u8".to_string()), + value: DecodeDifferent::Decoded("u16".to_string()), + hasher: StorageHasher::Blake2_128Concat, + unused: false, + }, + default: DecodeDifferent::Decoded(vec![4, 0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Map2".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: DecodeDifferent::Decoded("u16".to_string()), + value: DecodeDifferent::Decoded("u32".to_string()), + hasher: StorageHasher::Twox64Concat, + unused: false, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("DoubleMap".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + value: DecodeDifferent::Decoded("u32".to_string()), + key1: DecodeDifferent::Decoded("u8".to_string()), + key2: DecodeDifferent::Decoded("u16".to_string()), + hasher: StorageHasher::Blake2_128Concat, + key2_hasher: StorageHasher::Twox64Concat, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("DoubleMap2".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + value: DecodeDifferent::Decoded("u64".to_string()), + key1: DecodeDifferent::Decoded("u16".to_string()), + key2: DecodeDifferent::Decoded("u32".to_string()), + hasher: StorageHasher::Twox64Concat, + key2_hasher: StorageHasher::Blake2_128Concat, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + ]), + })), + calls: Some(DecodeDifferent::Decoded(vec![ + FunctionMetadata { + name: DecodeDifferent::Decoded("foo".to_string()), + arguments: DecodeDifferent::Decoded(vec![ + FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("_foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("_bar".to_string()), + ty: DecodeDifferent::Decoded("u32".to_string()), + } + ]), + documentation: DecodeDifferent::Decoded(vec![ + " Doc comment put in metadata".to_string(), + ]), + }, + FunctionMetadata { + name: DecodeDifferent::Decoded("foo_transactional".to_string()), + arguments: DecodeDifferent::Decoded(vec![ + FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + } + ]), + documentation: DecodeDifferent::Decoded(vec![ + " Doc comment put in metadata".to_string(), + ]), + }, + ])), + event: Some(DecodeDifferent::Decoded(vec![ + EventMetadata { + name: DecodeDifferent::Decoded("Proposed".to_string()), + arguments: DecodeDifferent::Decoded(vec!["::AccountId".to_string()]), + documentation: DecodeDifferent::Decoded(vec![ + " doc comment put in metadata".to_string() + ]), + }, + EventMetadata { + name: DecodeDifferent::Decoded("Spending".to_string()), + arguments: DecodeDifferent::Decoded(vec!["Balance".to_string()]), + documentation: DecodeDifferent::Decoded(vec![ + " doc".to_string() + ]), + }, + EventMetadata { + name: DecodeDifferent::Decoded("Something".to_string()), + arguments: DecodeDifferent::Decoded(vec!["Other".to_string()]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + EventMetadata { + name: DecodeDifferent::Decoded("SomethingElse".to_string()), + arguments: DecodeDifferent::Decoded(vec!["::_1".to_string()]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + ])), + constants: DecodeDifferent::Decoded(vec![ + ModuleConstantMetadata { + name: DecodeDifferent::Decoded("MyGetParam".to_string()), + ty: DecodeDifferent::Decoded("u32".to_string()), + value: DecodeDifferent::Decoded(vec![10, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![ + " Some comment".to_string(), + " Some comment".to_string(), + ]), + }, + ModuleConstantMetadata { + name: DecodeDifferent::Decoded("MyGetParam2".to_string()), + ty: DecodeDifferent::Decoded("u32".to_string()), + value: DecodeDifferent::Decoded(vec![11, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![ + " Some comment".to_string(), + " Some comment".to_string(), + ]), + }, + ModuleConstantMetadata { + name: DecodeDifferent::Decoded("MyGetParam3".to_string()), + ty: DecodeDifferent::Decoded("::_1".to_string()), + value: DecodeDifferent::Decoded(vec![12, 0, 0, 0, 0, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + ModuleConstantMetadata { + name: DecodeDifferent::Decoded("some_extra".to_string()), + ty: DecodeDifferent::Decoded("T::AccountId".to_string()), + value: DecodeDifferent::Decoded(vec![100, 0, 0, 0, 0, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![ + " Some doc".to_string(), + " Some doc".to_string(), + ]), + }, + ModuleConstantMetadata { + name: DecodeDifferent::Decoded("some_extra_extra".to_string()), + ty: DecodeDifferent::Decoded("T::AccountId".to_string()), + value: DecodeDifferent::Decoded(vec![0, 0, 0, 0, 0, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![ + " Some doc".to_string(), + ]), + }, + ]), + errors: DecodeDifferent::Decoded(vec![ + ErrorMetadata { + name: DecodeDifferent::Decoded("InsufficientProposersBalance".to_string()), + documentation: DecodeDifferent::Decoded(vec![ + " doc comment put into metadata".to_string(), + ]), + }, + ]), + }; + + let metadata = match Runtime::metadata().1 { + RuntimeMetadata::V12(metadata) => metadata, + _ => panic!("metadata has been bump, test needs to be updated"), + }; + + let modules_metadata = match metadata.modules { + DecodeDifferent::Encode(modules_metadata) => modules_metadata, + _ => unreachable!(), + }; + + let pallet_metadata = ModuleMetadata::decode(&mut &modules_metadata[1].encode()[..]).unwrap(); + + pretty_assertions::assert_eq!(pallet_metadata, expected_pallet_metadata); +} diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs new file mode 100644 index 0000000000000..5b9001e0475fe --- /dev/null +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -0,0 +1,297 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub trait SomeAssociation { + type A: frame_support::dispatch::Parameter + Default; +} +impl SomeAssociation for u64 { + type A = u64; +} + +mod pallet_old { + use frame_support::{ + decl_storage, decl_error, decl_event, decl_module, weights::Weight, traits::Get, Parameter + }; + use frame_system::ensure_root; + use super::SomeAssociation; + + pub trait Config: frame_system::Config { + type SomeConst: Get; + type Balance: Parameter + codec::HasCompact + From + Into + Default + + SomeAssociation; + type Event: From> + Into<::Event>; + } + + decl_storage! { + trait Store for Module as Example { + /// Some documentation + Dummy get(fn dummy) config(): Option; + Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + Foo get(fn foo) config(): T::Balance = 3.into(); + Double get(fn double): double_map + hasher(blake2_128_concat) u32, + hasher(twox_64_concat) u64 + => ::A; + } + } + + decl_event!( + pub enum Event where Balance = ::Balance { + /// Dummy event, just here so there's a generic type that's used. + Dummy(Balance), + } + ); + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + fn deposit_event() = default; + const SomeConst: T::Balance = T::SomeConst::get(); + + #[weight = >::into(new_value.clone())] + fn set_dummy(origin, #[compact] new_value: T::Balance) { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(RawEvent::Dummy(new_value)); + } + + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + } + + decl_error! { + pub enum Error for Module { + /// Some wrong behavior + Wrong, + } + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::SomeAssociation; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use frame_system::ensure_root; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Balance: Parameter + codec::HasCompact + From + Into + Default + + MaybeSerializeDeserialize + SomeAssociation; + #[pallet::constant] + type SomeConst: Get; + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks for Pallet { + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + + #[pallet::call] + impl Pallet { + #[pallet::weight(>::into(new_value.clone()))] + fn set_dummy( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(Event::Dummy(new_value)); + + Ok(().into()) + } + } + + #[pallet::error] + pub enum Error { + /// Some wrong behavior + Wrong, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + #[pallet::metadata(T::Balance = "Balance")] + pub enum Event { + /// Dummy event, just here so there's a generic type that's used. + Dummy(T::Balance), + } + + #[pallet::storage] + /// Some documentation + type Dummy = StorageValue<_, T::Balance, OptionQuery>; + + #[pallet::storage] + type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + + #[pallet::type_value] pub fn OnFooEmpty() -> T::Balance { 3.into() } + #[pallet::storage] + type Foo = StorageValue<_, T::Balance, ValueQuery, OnFooEmpty>; + + #[pallet::storage] + type Double = StorageDoubleMap< + _, Blake2_128Concat, u32, Twox64Concat, u64, ::A, ValueQuery + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + dummy: Option, + bar: Vec<(T::AccountId, T::Balance)>, + foo: T::Balance, + } + + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + dummy: Default::default(), + bar: Default::default(), + foo: OnFooEmpty::::get(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(dummy) = self.dummy.as_ref() { + >::put(dummy); + } + for (k, v) in &self.bar { + >::insert(k, v); + } + >::put(&self.foo); + } + } +} + +frame_support::parameter_types!( + pub const SomeConst: u64 = 10; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Module, Call, Event}, + // NOTE: name Example here is needed in order to have same module prefix + Example: pallet::{Module, Call, Event, Config, Storage}, + PalletOld: pallet_old::{Module, Call, Event, Config, Storage}, + } +); + +#[cfg(test)] +mod test { + use super::Runtime; + use super::pallet; + use super::pallet_old; + use codec::{Decode, Encode}; + + #[test] + fn metadata() { + let metadata = Runtime::metadata(); + let modules = match metadata.1 { + frame_metadata::RuntimeMetadata::V12(frame_metadata::RuntimeMetadataV12 { + modules: frame_metadata::DecodeDifferent::Encode(m), + .. + }) => m, + _ => unreachable!(), + }; + pretty_assertions::assert_eq!(modules[1].storage, modules[2].storage); + pretty_assertions::assert_eq!(modules[1].calls, modules[2].calls); + pretty_assertions::assert_eq!(modules[1].event, modules[2].event); + pretty_assertions::assert_eq!(modules[1].constants, modules[2].constants); + pretty_assertions::assert_eq!(modules[1].errors, modules[2].errors); + } + + #[test] + fn types() { + assert_eq!( + pallet_old::Event::::decode( + &mut &pallet::Event::::Dummy(10).encode()[..] + ).unwrap(), + pallet_old::Event::::Dummy(10), + ); + + assert_eq!( + pallet_old::Call::::decode( + &mut &pallet::Call::::set_dummy(10).encode()[..] + ).unwrap(), + pallet_old::Call::::set_dummy(10), + ); + } +} diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs new file mode 100644 index 0000000000000..d7de03ea46cfd --- /dev/null +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -0,0 +1,314 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod pallet_old { + use frame_support::{ + decl_storage, decl_error, decl_event, decl_module, weights::Weight, traits::Get, Parameter + }; + use frame_system::ensure_root; + + pub trait Config: frame_system::Config { + type SomeConst: Get; + type Balance: Parameter + codec::HasCompact + From + Into + Default; + type Event: From> + Into<::Event>; + } + + decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as Example { + /// Some documentation + Dummy get(fn dummy) config(): Option; + Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + Foo get(fn foo) config(): T::Balance = 3.into(); + Double get(fn double): + double_map hasher(blake2_128_concat) u32, hasher(twox_64_concat) u64 => u16; + } + } + + decl_event!( + pub enum Event where Balance = >::Balance { + /// Dummy event, just here so there's a generic type that's used. + Dummy(Balance), + } + ); + + decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call + where origin: T::Origin + { + type Error = Error; + fn deposit_event() = default; + const SomeConst: T::Balance = T::SomeConst::get(); + + #[weight = >::into(new_value.clone())] + fn set_dummy(origin, #[compact] new_value: T::Balance) { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(RawEvent::Dummy(new_value)); + } + + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + } + + decl_error! { + pub enum Error for Module, I: Instance> { + /// Some wrong behavior + Wrong, + } + } +} + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use frame_system::ensure_root; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Balance: Parameter + codec::HasCompact + From + Into + Default + + MaybeSerializeDeserialize; + #[pallet::constant] + type SomeConst: Get; + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks for Pallet { + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + #[pallet::weight(>::into(new_value.clone()))] + fn set_dummy( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(Event::Dummy(new_value)); + + Ok(().into()) + } + } + + #[pallet::error] + pub enum Error { + /// Some wrong behavior + Wrong, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + #[pallet::metadata(T::Balance = "Balance")] + pub enum Event, I: 'static = ()> { + /// Dummy event, just here so there's a generic type that's used. + Dummy(T::Balance), + } + + #[pallet::storage] + /// Some documentation + type Dummy, I: 'static = ()> = StorageValue<_, T::Balance, OptionQuery>; + + #[pallet::storage] + type Bar, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + + #[pallet::storage] + type Foo, I: 'static = ()> = + StorageValue<_, T::Balance, ValueQuery, OnFooEmpty>; + #[pallet::type_value] pub fn OnFooEmpty, I: 'static>() -> T::Balance { 3.into() } + + #[pallet::storage] + type Double = StorageDoubleMap< + _, Blake2_128Concat, u32, Twox64Concat, u64, u16, ValueQuery + >; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + dummy: Option, + bar: Vec<(T::AccountId, T::Balance)>, + foo: T::Balance, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + dummy: Default::default(), + bar: Default::default(), + foo: OnFooEmpty::::get(), + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(dummy) = self.dummy.as_ref() { + >::put(dummy); + } + for (k, v) in &self.bar { + >::insert(k, v); + } + >::put(&self.foo); + } + } +} + +frame_support::parameter_types!( + pub const SomeConst: u64 = 10; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Module, Call, Event}, + Example: pallet::{Module, Call, Event, Config, Storage}, + PalletOld: pallet_old::{Module, Call, Event, Config, Storage}, + Instance2Example: pallet::::{Module, Call, Event, Config, Storage}, + PalletOld2: pallet_old::::{Module, Call, Event, Config, Storage}, + Instance3Example: pallet::::{Module, Call, Event, Config, Storage}, + PalletOld3: pallet_old::::{Module, Call, Event, Config, Storage}, + } +); + +#[cfg(test)] +mod test { + use super::Runtime; + use super::pallet; + use super::pallet_old; + use codec::{Decode, Encode}; + + #[test] + fn metadata() { + let metadata = Runtime::metadata(); + let modules = match metadata.1 { + frame_metadata::RuntimeMetadata::V12(frame_metadata::RuntimeMetadataV12 { + modules: frame_metadata::DecodeDifferent::Encode(m), + .. + }) => m, + _ => unreachable!(), + }; + for i in vec![1, 3, 5].into_iter() { + pretty_assertions::assert_eq!(modules[i].storage, modules[i+1].storage); + pretty_assertions::assert_eq!(modules[i].calls, modules[i+1].calls); + pretty_assertions::assert_eq!(modules[i].event, modules[i+1].event); + pretty_assertions::assert_eq!(modules[i].constants, modules[i+1].constants); + pretty_assertions::assert_eq!(modules[i].errors, modules[i+1].errors); + } + } + + #[test] + fn types() { + assert_eq!( + pallet_old::Event::::decode( + &mut &pallet::Event::::Dummy(10).encode()[..] + ).unwrap(), + pallet_old::Event::::Dummy(10), + ); + + assert_eq!( + pallet_old::Call::::decode( + &mut &pallet::Call::::set_dummy(10).encode()[..] + ).unwrap(), + pallet_old::Call::::set_dummy(10), + ); + } +} diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs new file mode 100644 index 0000000000000..62654d53e19d7 --- /dev/null +++ b/frame/support/test/tests/pallet_instance.rs @@ -0,0 +1,709 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + weights::{DispatchInfo, DispatchClass, Pays, GetDispatchInfo}, + traits::{ + GetCallName, GetPalletVersion, OnInitialize, OnFinalize, OnRuntimeUpgrade, OnGenesis, + }, + dispatch::UnfilteredDispatchable, + storage::unhashed, +}; +use sp_runtime::DispatchError; +use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; + +#[frame_support::pallet] +pub mod pallet { + use sp_std::any::TypeId; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + type BalanceOf = >::Balance; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam: Get; + type Balance: Parameter + Default; + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(_: BlockNumberFor) -> Weight { + if TypeId::of::() == TypeId::of::<()>() { + Self::deposit_event(Event::Something(10)); + 10 + } else { + Self::deposit_event(Event::Something(11)); + 11 + } + } + fn on_finalize(_: BlockNumberFor) { + if TypeId::of::() == TypeId::of::<()>() { + Self::deposit_event(Event::Something(20)); + } else { + Self::deposit_event(Event::Something(21)); + } + } + fn on_runtime_upgrade() -> Weight { + if TypeId::of::() == TypeId::of::<()>() { + Self::deposit_event(Event::Something(30)); + 30 + } else { + Self::deposit_event(Event::Something(31)); + 31 + } + } + fn integrity_test() { + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Doc comment put in metadata + #[pallet::weight(Weight::from(*_foo))] + fn foo(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { + let _ = origin; + Self::deposit_event(Event::Something(3)); + Ok(().into()) + } + + /// Doc comment put in metadata + #[pallet::weight(1)] + #[frame_support::transactional] + fn foo_transactional( + origin: OriginFor, + #[pallet::compact] _foo: u32 + ) -> DispatchResultWithPostInfo { + let _ = origin; + Ok(().into()) + } + } + + + #[pallet::error] + pub enum Error { + /// doc comment put into metadata + InsufficientProposersBalance, + } + + #[pallet::event] + #[pallet::metadata(BalanceOf = "Balance", u32 = "Other")] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// doc comment put in metadata + Proposed(::AccountId), + /// doc + Spending(BalanceOf), + Something(u32), + } + + #[pallet::storage] + pub type Value = StorageValue<_, u32>; + + #[pallet::storage] + pub type Map = StorageMap<_, Blake2_128Concat, u8, u16>; + + #[pallet::storage] + pub type Map2 = StorageMap<_, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap = + StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap2 = + StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; + + #[pallet::genesis_config] + #[derive(Default)] + pub struct GenesisConfig { + _myfield: u32, + } + + #[pallet::genesis_build] + impl, I:'static> GenesisBuild for GenesisConfig { + fn build(&self) {} + } + + #[pallet::origin] + #[derive(EqNoBound, RuntimeDebugNoBound, CloneNoBound, PartialEqNoBound, Encode, Decode)] + pub struct Origin(PhantomData<(T, I)>); + + #[pallet::validate_unsigned] + impl, I: 'static> ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } + + #[pallet::inherent] + impl, I: 'static> ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + } + + #[derive(codec::Encode, sp_runtime::RuntimeDebug)] + #[cfg_attr(feature = "std", derive(codec::Decode))] + pub enum InherentError { + } + + impl sp_inherents::IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + unimplemented!(); + } + } + + pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; +} + +// Test that a instantiable pallet with a generic genesis_config is correctly handled +#[frame_support::pallet] +pub mod pallet2 { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet {} + + #[pallet::call] + impl, I: 'static> Pallet {} + + #[pallet::event] + pub enum Event, I: 'static = ()> { + /// Something + Something(u32), + } + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + phantom: PhantomData<(T, I)>, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + phantom: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) {} + } +} + +frame_support::parameter_types!( + pub const MyGetParam: u32= 10; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type MyGetParam= MyGetParam; + type Balance = u64; +} +impl pallet::Config for Runtime { + type Event = Event; + type MyGetParam= MyGetParam; + type Balance = u64; +} +impl pallet2::Config for Runtime { + type Event = Event; +} +impl pallet2::Config for Runtime { + type Event = Event; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Module, Call, Event}, + Example: pallet::{Module, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, + Instance1Example: pallet::::{ + Module, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned + }, + Example2: pallet2::{Module, Call, Event, Config, Storage}, + Instance1Example2: pallet2::::{Module, Call, Event, Config, Storage}, + } +); + +#[test] +fn call_expand() { + let call_foo = pallet::Call::::foo(3); + assert_eq!( + call_foo.get_dispatch_info(), + DispatchInfo { + weight: 3, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + } + ); + assert_eq!(call_foo.get_call_name(), "foo"); + assert_eq!( + pallet::Call::::get_call_names(), + &["foo", "foo_transactional"], + ); + + let call_foo = pallet::Call::::foo(3); + assert_eq!( + call_foo.get_dispatch_info(), + DispatchInfo { + weight: 3, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + } + ); + assert_eq!(call_foo.get_call_name(), "foo"); + assert_eq!( + pallet::Call::::get_call_names(), + &["foo", "foo_transactional"], + ); +} + +#[test] +fn error_expand() { + assert_eq!( + format!("{:?}", pallet::Error::::InsufficientProposersBalance), + String::from("InsufficientProposersBalance"), + ); + assert_eq!( + <&'static str>::from(pallet::Error::::InsufficientProposersBalance), + "InsufficientProposersBalance", + ); + assert_eq!( + DispatchError::from(pallet::Error::::InsufficientProposersBalance), + DispatchError::Module { + index: 1, + error: 0, + message: Some("InsufficientProposersBalance"), + }, + ); + + assert_eq!( + format!("{:?}", pallet::Error::::InsufficientProposersBalance), + String::from("InsufficientProposersBalance"), + ); + assert_eq!( + <&'static str>::from(pallet::Error::::InsufficientProposersBalance), + "InsufficientProposersBalance", + ); + assert_eq!( + DispatchError::from(pallet::Error::::InsufficientProposersBalance), + DispatchError::Module { + index: 2, + error: 0, + message: Some("InsufficientProposersBalance"), + }, + ); +} + +#[test] +fn instance_expand() { + // assert same type + let _: pallet::__InherentHiddenInstance = (); +} + +#[test] +fn pallet_expand_deposit_event() { + TestExternalities::default().execute_with(|| { + frame_system::Module::::set_block_number(1); + pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); + assert_eq!( + frame_system::Module::::events()[0].event, + Event::pallet(pallet::Event::Something(3)), + ); + }); + + TestExternalities::default().execute_with(|| { + frame_system::Module::::set_block_number(1); + pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); + assert_eq!( + frame_system::Module::::events()[0].event, + Event::pallet_Instance1(pallet::Event::Something(3)), + ); + }); +} + +#[test] +fn storage_expand() { + use frame_support::pallet_prelude::*; + use frame_support::StoragePrefixedMap; + + fn twox_64_concat(d: &[u8]) -> Vec { + let mut v = twox_64(d).to_vec(); + v.extend_from_slice(d); + v + } + + fn blake2_128_concat(d: &[u8]) -> Vec { + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v + } + + TestExternalities::default().execute_with(|| { + >::put(1); + let k = [twox_128(b"Example"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + >::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u16)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + }); + + TestExternalities::default().execute_with(|| { + >::put(1); + let k = [twox_128(b"Instance1Example"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + >::insert(1, 2); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u16)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(1, 2); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + }); +} + +#[test] +fn pallet_hooks_expand() { + TestExternalities::default().execute_with(|| { + frame_system::Module::::set_block_number(1); + + assert_eq!(AllModules::on_initialize(1), 21); + AllModules::on_finalize(1); + + assert_eq!(pallet::Pallet::::storage_version(), None); + assert_eq!(pallet::Pallet::::storage_version(), None); + assert_eq!(AllModules::on_runtime_upgrade(), 61); + assert_eq!( + pallet::Pallet::::storage_version(), + Some(pallet::Pallet::::current_version()), + ); + assert_eq!( + pallet::Pallet::::storage_version(), + Some(pallet::Pallet::::current_version()), + ); + + // The order is indeed reversed due to https://github.com/paritytech/substrate/issues/6280 + assert_eq!( + frame_system::Module::::events()[0].event, + Event::pallet_Instance1(pallet::Event::Something(11)), + ); + assert_eq!( + frame_system::Module::::events()[1].event, + Event::pallet(pallet::Event::Something(10)), + ); + assert_eq!( + frame_system::Module::::events()[2].event, + Event::pallet_Instance1(pallet::Event::Something(21)), + ); + assert_eq!( + frame_system::Module::::events()[3].event, + Event::pallet(pallet::Event::Something(20)), + ); + assert_eq!( + frame_system::Module::::events()[4].event, + Event::pallet_Instance1(pallet::Event::Something(31)), + ); + assert_eq!( + frame_system::Module::::events()[5].event, + Event::pallet(pallet::Event::Something(30)), + ); + }) +} + +#[test] +fn pallet_on_genesis() { + TestExternalities::default().execute_with(|| { + assert_eq!(pallet::Pallet::::storage_version(), None); + pallet::Pallet::::on_genesis(); + assert_eq!( + pallet::Pallet::::storage_version(), + Some(pallet::Pallet::::current_version()), + ); + + assert_eq!(pallet::Pallet::::storage_version(), None); + pallet::Pallet::::on_genesis(); + assert_eq!( + pallet::Pallet::::storage_version(), + Some(pallet::Pallet::::current_version()), + ); + }) +} + +#[test] +fn metadata() { + use frame_metadata::*; + use codec::{Decode, Encode}; + + let expected_pallet_metadata = ModuleMetadata { + index: 1, + name: DecodeDifferent::Decoded("Example".to_string()), + storage: Some(DecodeDifferent::Decoded(StorageMetadata { + prefix: DecodeDifferent::Decoded("Example".to_string()), + entries: DecodeDifferent::Decoded(vec![ + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Value".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Decoded("u32".to_string())), + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Map".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: DecodeDifferent::Decoded("u8".to_string()), + value: DecodeDifferent::Decoded("u16".to_string()), + hasher: StorageHasher::Blake2_128Concat, + unused: false, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Map2".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: DecodeDifferent::Decoded("u16".to_string()), + value: DecodeDifferent::Decoded("u32".to_string()), + hasher: StorageHasher::Twox64Concat, + unused: false, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("DoubleMap".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + value: DecodeDifferent::Decoded("u32".to_string()), + key1: DecodeDifferent::Decoded("u8".to_string()), + key2: DecodeDifferent::Decoded("u16".to_string()), + hasher: StorageHasher::Blake2_128Concat, + key2_hasher: StorageHasher::Twox64Concat, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("DoubleMap2".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + value: DecodeDifferent::Decoded("u64".to_string()), + key1: DecodeDifferent::Decoded("u16".to_string()), + key2: DecodeDifferent::Decoded("u32".to_string()), + hasher: StorageHasher::Twox64Concat, + key2_hasher: StorageHasher::Blake2_128Concat, + }, + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + ]), + })), + calls: Some(DecodeDifferent::Decoded(vec![ + FunctionMetadata { + name: DecodeDifferent::Decoded("foo".to_string()), + arguments: DecodeDifferent::Decoded(vec![ + FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("_foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + } + ]), + documentation: DecodeDifferent::Decoded(vec![ + " Doc comment put in metadata".to_string(), + ]), + }, + FunctionMetadata { + name: DecodeDifferent::Decoded("foo_transactional".to_string()), + arguments: DecodeDifferent::Decoded(vec![ + FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("_foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + } + ]), + documentation: DecodeDifferent::Decoded(vec![ + " Doc comment put in metadata".to_string(), + ]), + }, + ])), + event: Some(DecodeDifferent::Decoded(vec![ + EventMetadata { + name: DecodeDifferent::Decoded("Proposed".to_string()), + arguments: DecodeDifferent::Decoded(vec!["::AccountId".to_string()]), + documentation: DecodeDifferent::Decoded(vec![ + " doc comment put in metadata".to_string() + ]), + }, + EventMetadata { + name: DecodeDifferent::Decoded("Spending".to_string()), + arguments: DecodeDifferent::Decoded(vec!["Balance".to_string()]), + documentation: DecodeDifferent::Decoded(vec![ + " doc".to_string() + ]), + }, + EventMetadata { + name: DecodeDifferent::Decoded("Something".to_string()), + arguments: DecodeDifferent::Decoded(vec!["Other".to_string()]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + ])), + constants: DecodeDifferent::Decoded(vec![ + ModuleConstantMetadata { + name: DecodeDifferent::Decoded("MyGetParam".to_string()), + ty: DecodeDifferent::Decoded("u32".to_string()), + value: DecodeDifferent::Decoded(vec![10, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, + ]), + errors: DecodeDifferent::Decoded(vec![ + ErrorMetadata { + name: DecodeDifferent::Decoded("InsufficientProposersBalance".to_string()), + documentation: DecodeDifferent::Decoded(vec![ + " doc comment put into metadata".to_string(), + ]), + }, + ]), + }; + + let mut expected_pallet_instance1_metadata = expected_pallet_metadata.clone(); + expected_pallet_instance1_metadata.name = DecodeDifferent::Decoded("Instance1Example".to_string()); + expected_pallet_instance1_metadata.index = 2; + match expected_pallet_instance1_metadata.storage { + Some(DecodeDifferent::Decoded(ref mut storage_meta)) => { + storage_meta.prefix = DecodeDifferent::Decoded("Instance1Example".to_string()); + }, + _ => unreachable!(), + } + + + let metadata = match Runtime::metadata().1 { + RuntimeMetadata::V12(metadata) => metadata, + _ => panic!("metadata has been bump, test needs to be updated"), + }; + + let modules_metadata = match metadata.modules { + DecodeDifferent::Encode(modules_metadata) => modules_metadata, + _ => unreachable!(), + }; + + let pallet_metadata = ModuleMetadata::decode(&mut &modules_metadata[1].encode()[..]).unwrap(); + let pallet_instance1_metadata = + ModuleMetadata::decode(&mut &modules_metadata[2].encode()[..]).unwrap(); + + pretty_assertions::assert_eq!(pallet_metadata, expected_pallet_metadata); + pretty_assertions::assert_eq!(pallet_instance1_metadata, expected_pallet_instance1_metadata); +} diff --git a/bin/node/runtime/src/weights/mod.rs b/frame/support/test/tests/pallet_ui.rs similarity index 62% rename from bin/node/runtime/src/weights/mod.rs rename to frame/support/test/tests/pallet_ui.rs index 5de6286da9b71..1836b06cabfdd 100644 --- a/bin/node/runtime/src/weights/mod.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -1,4 +1,6 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! A list of the different weight modules for our runtime. +#[rustversion::attr(not(stable), ignore)] +#[test] +fn pallet_ui() { + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); -pub mod pallet_contracts; + let t = trybuild::TestCases::new(); + t.compile_fail("tests/pallet_ui/*.rs"); +} diff --git a/frame/support/test/tests/pallet_ui/attr_non_empty.rs b/frame/support/test/tests/pallet_ui/attr_non_empty.rs new file mode 100644 index 0000000000000..5173d983bbd8e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/attr_non_empty.rs @@ -0,0 +1,6 @@ +#[frame_support::pallet [foo]] +mod foo { +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/attr_non_empty.stderr b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr new file mode 100644 index 0000000000000..144af5a17ea5c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet macro call: expected no attributes, e.g. macro call must be just `#[frame_support::pallet]` or `#[pallet]` + --> $DIR/attr_non_empty.rs:1:26 + | +1 | #[frame_support::pallet [foo]] + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs new file mode 100644 index 0000000000000..69d35344d5761 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: codec::Codec; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr new file mode 100644 index 0000000000000..1eaf71be17104 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -0,0 +1,28 @@ +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` + --> $DIR/call_argument_invalid_bound.rs:20:37 + | +20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ + | +help: consider further restricting this bound + | +17 | #[pallet::call + std::cmp::PartialEq] + | ^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::Bar: Clone` is not satisfied + --> $DIR/call_argument_invalid_bound.rs:20:37 + | +20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `Clone` is not implemented for `::Bar` + | + = note: required by `clone` + +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` + --> $DIR/call_argument_invalid_bound.rs:20:37 + | +20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs new file mode 100644 index 0000000000000..581c72a4240a0 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr new file mode 100644 index 0000000000000..86968221cf30c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -0,0 +1,50 @@ +error[E0277]: the trait bound `pallet::Call: Decode` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:17:12 + | +17 | #[pallet::call] + | ^^^^ the trait `Decode` is not implemented for `pallet::Call` + | + ::: $WORKSPACE/frame/support/src/dispatch.rs + | + | type Call: UnfilteredDispatchable + Codec + Clone + PartialEq + Eq; + | ----- required by this bound in `frame_support::Callable::Call` + +error[E0277]: the trait bound `pallet::Call: pallet::_::_parity_scale_codec::Encode` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:17:12 + | +17 | #[pallet::call] + | ^^^^ the trait `pallet::_::_parity_scale_codec::Encode` is not implemented for `pallet::Call` + | + ::: $WORKSPACE/frame/support/src/dispatch.rs + | + | type Call: UnfilteredDispatchable + Codec + Clone + PartialEq + Eq; + | ----- required by this bound in `frame_support::Callable::Call` + +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` + --> $DIR/call_argument_invalid_bound_2.rs:20:37 + | +20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ + | +help: consider further restricting this bound + | +17 | #[pallet::call + std::cmp::PartialEq] + | ^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::Bar: Clone` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:20:37 + | +20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `Clone` is not implemented for `::Bar` + | + = note: required by `clone` + +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` + --> $DIR/call_argument_invalid_bound_2.rs:20:37 + | +20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs new file mode 100644 index 0000000000000..97f362551037d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs @@ -0,0 +1,29 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + use codec::{Encode, Decode}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[derive(Encode, Decode)] + struct Bar; + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr new file mode 100644 index 0000000000000..89cee573a2757 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -0,0 +1,26 @@ +error[E0369]: binary operation `==` cannot be applied to type `&Bar` + --> $DIR/call_argument_invalid_bound_3.rs:22:37 + | +22 | fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ + | + = note: an implementation of `std::cmp::PartialEq` might be missing for `&Bar` + +error[E0277]: the trait bound `Bar: Clone` is not satisfied + --> $DIR/call_argument_invalid_bound_3.rs:22:37 + | +22 | fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `Bar` + | + = note: required by `clone` + +error[E0277]: `Bar` doesn't implement `std::fmt::Debug` + --> $DIR/call_argument_invalid_bound_3.rs:22:37 + | +22 | fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ `Bar` cannot be formatted using `{:?}` + | + = help: the trait `std::fmt::Debug` is not implemented for `Bar` + = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/call_invalid_const.rs b/frame/support/test/tests/pallet_ui/call_invalid_const.rs new file mode 100644 index 0000000000000..1a28bc32e65c6 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_const.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + const Foo: u8 = 3u8; + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_const.stderr b/frame/support/test/tests/pallet_ui/call_invalid_const.stderr new file mode 100644 index 0000000000000..0acb3e864a512 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_const.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, only method accepted + --> $DIR/call_invalid_const.rs:17:3 + | +17 | const Foo: u8 = 3u8; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs new file mode 100644 index 0000000000000..edf953b5976c0 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + fn foo(origin: u8) {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr new file mode 100644 index 0000000000000..855c59fd8d57d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -0,0 +1,11 @@ +error: Invalid type: expected `OriginFor` + --> $DIR/call_invalid_origin_type.rs:17:18 + | +17 | fn foo(origin: u8) {} + | ^^ + +error: expected `OriginFor` + --> $DIR/call_invalid_origin_type.rs:17:18 + | +17 | fn foo(origin: u8) {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.rs b/frame/support/test/tests/pallet_ui/call_missing_weight.rs new file mode 100644 index 0000000000000..2ce607c53ac3a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.stderr b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr new file mode 100644 index 0000000000000..37386d7771a7a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` + --> $DIR/call_missing_weight.rs:17:3 + | +17 | fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_no_origin.rs b/frame/support/test/tests/pallet_ui/call_no_origin.rs new file mode 100644 index 0000000000000..83d10b6b08b4f --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_origin.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + fn foo() {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_no_origin.stderr b/frame/support/test/tests/pallet_ui/call_no_origin.stderr new file mode 100644 index 0000000000000..42afd02c42639 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_origin.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, must have at least origin arg + --> $DIR/call_no_origin.rs:17:3 + | +17 | fn foo() {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_no_return.rs b/frame/support/test/tests/pallet_ui/call_no_return.rs new file mode 100644 index 0000000000000..a18c30f6d6d90 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_return.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + fn foo(origin: OriginFor) {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_no_return.stderr b/frame/support/test/tests/pallet_ui/call_no_return.stderr new file mode 100644 index 0000000000000..b16d401355c12 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, require return type DispatchResultWithPostInfo + --> $DIR/call_no_return.rs:17:3 + | +17 | fn foo(origin: OriginFor) {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/duplicate_call_attr.rs b/frame/support/test/tests/pallet_ui/duplicate_call_attr.rs new file mode 100644 index 0000000000000..b8a32a0bd9f69 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_call_attr.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue<_, u8>; + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/duplicate_call_attr.stderr b/frame/support/test/tests/pallet_ui/duplicate_call_attr.stderr new file mode 100644 index 0000000000000..c2956717bb2bb --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_call_attr.stderr @@ -0,0 +1,5 @@ +error: Invalid duplicated attribute + --> $DIR/duplicate_call_attr.rs:23:12 + | +23 | #[pallet::call] + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs b/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs new file mode 100644 index 0000000000000..d675ddefe985b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs @@ -0,0 +1,26 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(trait Store)] + #[pallet::generate_store(trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr b/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr new file mode 100644 index 0000000000000..eed6ad4494edc --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::pallet, multiple argument pallet::generate_store found + --> $DIR/duplicate_store_attr.rs:12:33 + | +12 | #[pallet::generate_store(trait Store)] + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/error_no_fieldless.rs b/frame/support/test/tests/pallet_ui/error_no_fieldless.rs new file mode 100644 index 0000000000000..c9d444d6f90dd --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_no_fieldless.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub enum Error { + U8(u8), + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_no_fieldless.stderr b/frame/support/test/tests/pallet_ui/error_no_fieldless.stderr new file mode 100644 index 0000000000000..1d69fbeff9aac --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_no_fieldless.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::error, unexpected fields, must be `Unit` + --> $DIR/error_no_fieldless.rs:20:5 + | +20 | U8(u8), + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/error_where_clause.rs b/frame/support/test/tests/pallet_ui/error_where_clause.rs new file mode 100644 index 0000000000000..29d7435bc4bc8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_where_clause.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub enum Error where u32: From {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_where_clause.stderr b/frame/support/test/tests/pallet_ui/error_where_clause.stderr new file mode 100644 index 0000000000000..8e9d0e60692d8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_where_clause.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::error, where clause is not allowed on pallet error item + --> $DIR/error_where_clause.rs:19:20 + | +19 | pub enum Error where u32: From {} + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item.rs b/frame/support/test/tests/pallet_ui/error_wrong_item.rs new file mode 100644 index 0000000000000..50e66dc8c0dce --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub struct Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item.stderr b/frame/support/test/tests/pallet_ui/error_wrong_item.stderr new file mode 100644 index 0000000000000..8c0496782fb16 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::error, expected item enum + --> $DIR/error_wrong_item.rs:19:2 + | +19 | pub struct Foo; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item_name.rs b/frame/support/test/tests/pallet_ui/error_wrong_item_name.rs new file mode 100644 index 0000000000000..14107fafb06ea --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item_name.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub enum Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item_name.stderr b/frame/support/test/tests/pallet_ui/error_wrong_item_name.stderr new file mode 100644 index 0000000000000..d7e54ad8a7516 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item_name.stderr @@ -0,0 +1,5 @@ +error: expected `Error` + --> $DIR/error_wrong_item_name.rs:19:11 + | +19 | pub enum Foo {} + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.rs b/frame/support/test/tests/pallet_ui/event_field_not_member.rs new file mode 100644 index 0000000000000..0ecde4c130878 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, IsType}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + type Event: IsType<::Event> + From>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr new file mode 100644 index 0000000000000..97d4db798e611 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -0,0 +1,28 @@ +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` + +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ + | +help: consider further restricting this bound + | +22 | pub enum Event { + | ^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::Bar: Clone` is not satisfied + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ the trait `Clone` is not implemented for `::Bar` + | + = note: required by `clone` diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.rs b/frame/support/test/tests/pallet_ui/event_not_in_trait.rs new file mode 100644 index 0000000000000..94151ba4c3d9d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr new file mode 100644 index 0000000000000..dd96c700ce7e5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr @@ -0,0 +1,7 @@ +error: Invalid usage of Event, `Config` contains no associated type `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). An Event associated type must be declare on trait `Config`. + --> $DIR/event_not_in_trait.rs:1:1 + | +1 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs new file mode 100644 index 0000000000000..fa3bf04d3530d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + type Event; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr new file mode 100644 index 0000000000000..1f58a37576d0d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr @@ -0,0 +1,5 @@ +error: Invalid `type Event`, associated type `Event` is reserved and must bound: `IsType<::Event>` + --> $DIR/event_type_invalid_bound.rs:9:3 + | +9 | type Event; + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs new file mode 100644 index 0000000000000..564a539b89f57 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, IsType}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + type Event: IsType<::Event>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr new file mode 100644 index 0000000000000..8b8946f3b25eb --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr @@ -0,0 +1,5 @@ +error: Invalid `type Event`, associated type `Event` is reserved and must bound: `From` or `From>` or `From>` + --> $DIR/event_type_invalid_bound_2.rs:9:3 + | +9 | type Event: IsType<::Event>; + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item.rs b/frame/support/test/tests/pallet_ui/event_wrong_item.rs new file mode 100644 index 0000000000000..d6690557c39d8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub struct Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item.stderr b/frame/support/test/tests/pallet_ui/event_wrong_item.stderr new file mode 100644 index 0000000000000..21eb0ed35e936 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::event, expected item enum + --> $DIR/event_wrong_item.rs:19:2 + | +19 | pub struct Foo; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item_name.rs b/frame/support/test/tests/pallet_ui/event_wrong_item_name.rs new file mode 100644 index 0000000000000..d828965c5173c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item_name.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item_name.stderr b/frame/support/test/tests/pallet_ui/event_wrong_item_name.stderr new file mode 100644 index 0000000000000..14e8615c56199 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item_name.stderr @@ -0,0 +1,5 @@ +error: expected `Event` + --> $DIR/event_wrong_item_name.rs:19:11 + | +19 | pub enum Foo {} + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs new file mode 100644 index 0000000000000..da5e8d0c4da52 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs @@ -0,0 +1,26 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, GenesisBuild}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_config] + pub struct GenesisConfig; + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr new file mode 100644 index 0000000000000..a2998788736ac --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -0,0 +1,10 @@ +error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is not satisfied + --> $DIR/genesis_default_not_satisfied.rs:22:18 + | +22 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` + | + ::: $WORKSPACE/frame/support/src/traits.rs + | + | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | ------- required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.rs b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.rs new file mode 100644 index 0000000000000..9ae851005acb3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.stderr b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.stderr new file mode 100644 index 0000000000000..9afc1037a48ae --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.stderr @@ -0,0 +1,5 @@ +error: `#[pallet::genesis_config]` and `#[pallet::genesis_build]` attributes must be either both used or both not used, instead genesis_config is unused and genesis_build is used + --> $DIR/genesis_inconsistent_build_config.rs:2:1 + | +2 | mod pallet { + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.rs b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.rs new file mode 100644 index 0000000000000..f1eae16f49600 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr new file mode 100644 index 0000000000000..f451f7b16aee5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr @@ -0,0 +1,13 @@ +error: Invalid genesis builder: expected `GenesisBuild` or `GenesisBuild` + --> $DIR/genesis_invalid_generic.rs:19:7 + | +19 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^ + +error: expected `<` + --> $DIR/genesis_invalid_generic.rs:1:1 + | +1 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/genesis_wrong_name.rs b/frame/support/test/tests/pallet_ui/genesis_wrong_name.rs new file mode 100644 index 0000000000000..5e8b297ba4ccf --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_wrong_name.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_build] + impl Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_wrong_name.stderr b/frame/support/test/tests/pallet_ui/genesis_wrong_name.stderr new file mode 100644 index 0000000000000..dd2e65588f56b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_wrong_name.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::genesis_build, expected impl<..> GenesisBuild<..> for GenesisConfig<..> + --> $DIR/genesis_wrong_name.rs:19:2 + | +19 | impl Foo {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.rs b/frame/support/test/tests/pallet_ui/hooks_invalid_item.rs new file mode 100644 index 0000000000000..7c66b3e6cecc1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.rs @@ -0,0 +1,19 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr new file mode 100644 index 0000000000000..0379448f694fc --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -0,0 +1,5 @@ +error[E0107]: wrong number of type arguments: expected 1, found 0 + --> $DIR/hooks_invalid_item.rs:12:18 + | +12 | impl Hooks for Pallet {} + | ^^^^^ expected 1 type argument diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_1.rs b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.rs new file mode 100644 index 0000000000000..00b57a01235c3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.rs @@ -0,0 +1,20 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_1.stderr b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.stderr new file mode 100644 index 0000000000000..352c21013cab0 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.stderr @@ -0,0 +1,29 @@ +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:16:7 + | +16 | impl Pallet {} + | ^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:16:18 + | +16 | impl Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:10:20 + | +10 | pub struct Pallet(core::marker::PhantomData); + | ^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:13:47 + | +13 | impl Hooks> for Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:13:7 + | +13 | impl Hooks> for Pallet {} + | ^ diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_2.rs b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.rs new file mode 100644 index 0000000000000..e7b51cb5ebef5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.rs @@ -0,0 +1,20 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet {} + + #[pallet::call] + impl, I: 'static> Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_2.stderr b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.stderr new file mode 100644 index 0000000000000..9f5d3c740cbd1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.stderr @@ -0,0 +1,29 @@ +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:16:7 + | +16 | impl, I: 'static> Pallet {} + | ^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:16:33 + | +16 | impl, I: 'static> Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:10:20 + | +10 | pub struct Pallet(core::marker::PhantomData<(T, I)>); + | ^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:13:62 + | +13 | impl, I: 'static> Hooks> for Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:13:7 + | +13 | impl, I: 'static> Hooks> for Pallet {} + | ^ diff --git a/frame/support/test/tests/pallet_ui/inherent_check_inner_span.rs b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.rs new file mode 100644 index 0000000000000..9704a7e1a442e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, ProvideInherent}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::inherent] + impl ProvideInherent for Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr new file mode 100644 index 0000000000000..75a522889ebd9 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr @@ -0,0 +1,10 @@ +error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent` + --> $DIR/inherent_check_inner_span.rs:19:2 + | +19 | impl ProvideInherent for Pallet {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent` in implementation + | + = help: implement the missing item: `type Call = Type;` + = help: implement the missing item: `type Error = Type;` + = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;` + = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<::Call> { todo!() }` diff --git a/frame/support/test/tests/pallet_ui/inherent_invalid_item.rs b/frame/support/test/tests/pallet_ui/inherent_invalid_item.rs new file mode 100644 index 0000000000000..97eda44721307 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_invalid_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::inherent] + impl Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inherent_invalid_item.stderr b/frame/support/test/tests/pallet_ui/inherent_invalid_item.stderr new file mode 100644 index 0000000000000..b62b1234bdeb0 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_invalid_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..> + --> $DIR/inherent_invalid_item.rs:19:2 + | +19 | impl Foo {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/mod_not_inlined.rs b/frame/support/test/tests/pallet_ui/mod_not_inlined.rs new file mode 100644 index 0000000000000..c74c7f5ef2a2b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/mod_not_inlined.rs @@ -0,0 +1,5 @@ +#[frame_support::pallet] +mod foo; + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/mod_not_inlined.stderr b/frame/support/test/tests/pallet_ui/mod_not_inlined.stderr new file mode 100644 index 0000000000000..9ad93939d8c00 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/mod_not_inlined.stderr @@ -0,0 +1,13 @@ +error[E0658]: non-inline modules in proc macro input are unstable + --> $DIR/mod_not_inlined.rs:2:1 + | +2 | mod foo; + | ^^^^^^^^ + | + = note: see issue #54727 for more information + +error: Invalid pallet definition, expected mod to be inlined. + --> $DIR/mod_not_inlined.rs:2:1 + | +2 | mod foo; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_incomplete_item.rs b/frame/support/test/tests/pallet_ui/storage_incomplete_item.rs new file mode 100644 index 0000000000000..e451df8c78a02 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_incomplete_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_incomplete_item.stderr b/frame/support/test/tests/pallet_ui/storage_incomplete_item.stderr new file mode 100644 index 0000000000000..57f3ab78a5382 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_incomplete_item.stderr @@ -0,0 +1,13 @@ +error: free type alias without body + --> $DIR/storage_incomplete_item.rs:19:2 + | +19 | type Foo; + | ^^^^^^^^- + | | + | help: provide a definition for the type: `= ;` + +error[E0433]: failed to resolve: use of undeclared crate or module `pallet` + --> $DIR/storage_incomplete_item.rs:18:4 + | +18 | #[pallet::storage] + | ^^^^^^ use of undeclared crate or module `pallet` diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.rs b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.rs new file mode 100644 index 0000000000000..c8df93c9b323d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.stderr new file mode 100644 index 0000000000000..d332e6c2d3d1b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.stderr @@ -0,0 +1,11 @@ +error: Invalid use of `#[pallet::storage]`, the type first generic argument must be `_`, the final argument is automatically set by macro. + --> $DIR/storage_invalid_first_generic.rs:19:29 + | +19 | type Foo = StorageValue; + | ^^ + +error: expected `_` + --> $DIR/storage_invalid_first_generic.rs:19:29 + | +19 | type Foo = StorageValue; + | ^^ diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.rs b/frame/support/test/tests/pallet_ui/storage_not_storage_type.rs new file mode 100644 index 0000000000000..03eee6fc8ec7d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = u8; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr new file mode 100644 index 0000000000000..ec4bde22ac7a8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `StorageDoubleMap` in order to expand metadata, found `u8` + --> $DIR/storage_not_storage_type.rs:19:16 + | +19 | type Foo = u8; + | ^^ diff --git a/frame/support/test/tests/pallet_ui/storage_value_no_generic.rs b/frame/support/test/tests/pallet_ui/storage_value_no_generic.rs new file mode 100644 index 0000000000000..e62bdafaa2643 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_no_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_value_no_generic.stderr b/frame/support/test/tests/pallet_ui/storage_value_no_generic.stderr new file mode 100644 index 0000000000000..894f7095b2b5a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_no_generic.stderr @@ -0,0 +1,5 @@ +error: pallet::storage unexpected number of generic argument, expected at least 2 args, found none + --> $DIR/storage_value_no_generic.rs:19:16 + | +19 | type Foo = StorageValue; + | ^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_wrong_item.rs b/frame/support/test/tests/pallet_ui/storage_wrong_item.rs new file mode 100644 index 0000000000000..56c4b86f2b35a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_wrong_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + impl Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_wrong_item.stderr b/frame/support/test/tests/pallet_ui/storage_wrong_item.stderr new file mode 100644 index 0000000000000..8cc180b5bfe49 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_wrong_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, expected item type + --> $DIR/storage_wrong_item.rs:19:2 + | +19 | impl Foo {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs b/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs new file mode 100644 index 0000000000000..3ebd1cb9fa608 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(pub trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr b/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr new file mode 100644 index 0000000000000..d8c62faa303ee --- /dev/null +++ b/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr @@ -0,0 +1,8 @@ +error[E0446]: private type `_GeneratedPrefixForStorageFoo` in public interface + --> $DIR/store_trait_leak_private.rs:11:37 + | +11 | #[pallet::generate_store(pub trait Store)] + | ^^^^^ can't leak private type +... +20 | #[pallet::storage] + | ------- `_GeneratedPrefixForStorageFoo` declared as private diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.rs b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.rs new file mode 100644 index 0000000000000..ce599d5a31e71 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type U; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr new file mode 100644 index 0000000000000..16c3531140eaa --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr @@ -0,0 +1,11 @@ +error: Invalid usage of `#[pallet::constant]`, syntax must be `type $SomeIdent: Get<$SomeType>;` + --> $DIR/trait_constant_invalid_bound.rs:9:3 + | +9 | type U; + | ^^^^ + +error: expected `:` + --> $DIR/trait_constant_invalid_bound.rs:9:9 + | +9 | type U; + | ^ diff --git a/frame/support/test/tests/pallet_ui/trait_invalid_item.rs b/frame/support/test/tests/pallet_ui/trait_invalid_item.rs new file mode 100644 index 0000000000000..8537659dcd037 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_invalid_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + const U: u8 = 3; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr b/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr new file mode 100644 index 0000000000000..72495d94b3079 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::constant in pallet::config, expected type trait item + --> $DIR/trait_invalid_item.rs:9:3 + | +9 | const U: u8 = 3; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/trait_no_supertrait.rs b/frame/support/test/tests/pallet_ui/trait_no_supertrait.rs new file mode 100644 index 0000000000000..0fc987f7bbdd7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_no_supertrait.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_no_supertrait.stderr b/frame/support/test/tests/pallet_ui/trait_no_supertrait.stderr new file mode 100644 index 0000000000000..c38f43d28eb33 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_no_supertrait.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::trait, expected explicit `frame_system::Config` as supertrait, found none. (try `pub trait Config: frame_system::Config { ...` or `pub trait Config: frame_system::Config { ...`). To disable this check, use `#[pallet::disable_frame_system_supertrait_check]` + --> $DIR/trait_no_supertrait.rs:7:2 + | +7 | pub trait Config { + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_error_in_block.rs b/frame/support/test/tests/pallet_ui/type_value_error_in_block.rs new file mode 100644 index 0000000000000..a13e1c7c5c2d2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_error_in_block.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::type_value] fn Foo() -> u32 { + // Just wrong code to see span + u32::new() + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr b/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr new file mode 100644 index 0000000000000..f46b89a067b06 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr @@ -0,0 +1,5 @@ +error[E0599]: no function or associated item named `new` found for type `u32` in the current scope + --> $DIR/type_value_error_in_block.rs:20:8 + | +20 | u32::new() + | ^^^ function or associated item not found in `u32` diff --git a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.rs b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.rs new file mode 100644 index 0000000000000..b04d8b894676d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config + where ::AccountId: From + {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet + where ::AccountId: From + {} + + #[pallet::call] + impl Pallet + where ::AccountId: From + {} + + #[pallet::type_value] fn Foo() -> u32 { 3u32 } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr new file mode 100644 index 0000000000000..85d7342b253d4 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr @@ -0,0 +1,47 @@ +error[E0277]: the trait bound `::AccountId: From` is not satisfied + --> $DIR/type_value_forgotten_where_clause.rs:24:34 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | --------- required by this bound in `pallet::Config` +... +24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } + | ^^^^^^ the trait `From` is not implemented for `::AccountId` + | +help: consider further restricting the associated type + | +24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::AccountId: From` is not satisfied + --> $DIR/type_value_forgotten_where_clause.rs:24:12 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | --------- required by this bound in `pallet::Config` +... +24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } + | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` + | +help: consider further restricting the associated type + | +24 | #[pallet::type_value where ::AccountId: From] fn Foo() -> u32 { 3u32 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::AccountId: From` is not satisfied + --> $DIR/type_value_forgotten_where_clause.rs:24:12 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | --------- required by this bound in `pallet::Config` +... +24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } + | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` + | +help: consider further restricting the associated type + | +24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_invalid_item.rs b/frame/support/test/tests/pallet_ui/type_value_invalid_item.rs new file mode 100644 index 0000000000000..1b6c975b09ed1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_invalid_item.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, PhantomData}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::type_value] struct Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_invalid_item.stderr b/frame/support/test/tests/pallet_ui/type_value_invalid_item.stderr new file mode 100644 index 0000000000000..5ae618df8837c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_invalid_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::type_value, expected item fn + --> $DIR/type_value_invalid_item.rs:18:24 + | +18 | #[pallet::type_value] struct Foo; + | ^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_no_return.rs b/frame/support/test/tests/pallet_ui/type_value_no_return.rs new file mode 100644 index 0000000000000..82eb3b17d0393 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_no_return.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, PhantomData}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::type_value] fn Foo() {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_no_return.stderr b/frame/support/test/tests/pallet_ui/type_value_no_return.stderr new file mode 100644 index 0000000000000..65ac0243f9f64 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_no_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::type_value, expected return type + --> $DIR/type_value_no_return.rs:18:24 + | +18 | #[pallet::type_value] fn Foo() {} + | ^^ diff --git a/frame/support/test/tests/pallet_version.rs b/frame/support/test/tests/pallet_version.rs index d6293ac6a308a..4cc93d395db2a 100644 --- a/frame/support/test/tests/pallet_version.rs +++ b/frame/support/test/tests/pallet_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,29 +20,24 @@ #![recursion_limit="128"] use codec::{Decode, Encode}; -use sp_runtime::{generic, traits::{BlakeTwo256, Block as _, Verify}, BuildStorage}; +use sp_runtime::{generic, traits::{BlakeTwo256, Verify}, BuildStorage}; use frame_support::{ traits::{PALLET_VERSION_STORAGE_KEY_POSTFIX, PalletVersion, OnRuntimeUpgrade, GetPalletVersion}, crate_to_pallet_version, weights::Weight, }; use sp_core::{H256, sr25519}; -mod system; - /// A version that we will check for in the tests const SOME_TEST_VERSION: PalletVersion = PalletVersion { major: 3000, minor: 30, patch: 13 }; /// Checks that `on_runtime_upgrade` sets the latest pallet version when being called without /// being provided by the user. mod module1 { - use super::*; - - pub trait Trait: system::Trait {} + pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where - origin: ::Origin, - system = system, + pub struct Module for enum Call where + origin: ::Origin, {} } } @@ -52,12 +47,11 @@ mod module1 { mod module2 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin, - system = system + pub struct Module, I: Instance=DefaultInstance> for enum Call where + origin: ::Origin, { fn on_runtime_upgrade() -> Weight { assert_eq!(crate_to_pallet_version!(), Self::current_version()); @@ -78,30 +72,100 @@ mod module2 { } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 {} + trait Store for Module, I: Instance=DefaultInstance> as Module2 {} + } +} + +#[frame_support::pallet] +mod pallet3 { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + return 3; + } + } + + #[pallet::call] + impl Pallet { + } +} + +#[frame_support::pallet] +mod pallet4 { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + return 3; + } + } + + #[pallet::call] + impl, I: 'static> Pallet { } } -impl module1::Trait for Runtime {} -impl module2::Trait for Runtime {} -impl module2::Trait for Runtime {} -impl module2::Trait for Runtime {} +impl module1::Config for Runtime {} +impl module2::Config for Runtime {} +impl module2::Config for Runtime {} +impl module2::Config for Runtime {} + +impl pallet3::Config for Runtime {} +impl pallet4::Config for Runtime {} +impl pallet4::Config for Runtime {} +impl pallet4::Config for Runtime {} pub type Signature = sr25519::Signature; pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { - type BaseCallFilter= (); - type Hash = H256; +frame_support::parameter_types!( + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = (); type Origin = Origin; + type Index = u64; type BlockNumber = BlockNumber; + type Call = Call; + type Hash = H256; + type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; type Event = Event; - type PalletInfo = PalletInfo; - type Call = Call; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); } frame_support::construct_runtime!( @@ -110,11 +174,15 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, + System: frame_system::{Module, Call, Event}, Module1: module1::{Module, Call}, Module2: module2::{Module, Call}, Module2_1: module2::::{Module, Call}, Module2_2: module2::::{Module, Call}, + Pallet3: pallet3::{Module, Call}, + Pallet4: pallet4::{Module, Call}, + Pallet4_1: pallet4::::{Module, Call}, + Pallet4_2: pallet4::::{Module, Call}, } ); @@ -156,6 +224,10 @@ fn on_runtime_upgrade_sets_the_pallet_versions_in_storage() { check_pallet_version("Module2"); check_pallet_version("Module2_1"); check_pallet_version("Module2_2"); + check_pallet_version("Pallet3"); + check_pallet_version("Pallet4"); + check_pallet_version("Pallet4_1"); + check_pallet_version("Pallet4_2"); }); } @@ -171,6 +243,10 @@ fn on_runtime_upgrade_overwrites_old_version() { check_pallet_version("Module2"); check_pallet_version("Module2_1"); check_pallet_version("Module2_2"); + check_pallet_version("Pallet3"); + check_pallet_version("Pallet4"); + check_pallet_version("Pallet4_1"); + check_pallet_version("Pallet4_2"); }); } @@ -183,6 +259,10 @@ fn genesis_init_puts_pallet_version_into_storage() { check_pallet_version("Module2"); check_pallet_version("Module2_1"); check_pallet_version("Module2_2"); + check_pallet_version("Pallet3"); + check_pallet_version("Pallet4"); + check_pallet_version("Pallet4_1"); + check_pallet_version("Pallet4_2"); let system_version = System::storage_version().expect("System version should be set"); assert_eq!(System::current_version(), system_version); diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs new file mode 100644 index 0000000000000..b09beb04cd17c --- /dev/null +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub trait Trait: frame_system::Config { + type Balance: frame_support::dispatch::Parameter; + /// The overarching event type. + type Event: From> + Into<::Event>; +} + +frame_support::decl_storage! { + trait Store for Module as Example { + Dummy get(fn dummy) config(): Option; + } +} + +frame_support::decl_event!( + pub enum Event where B = ::Balance { + Dummy(B), + } +); + +frame_support::decl_error!( + pub enum Error for Module { + Dummy, + } +); + +frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + type Error = Error; + const Foo: u32 = u32::max_value(); + + #[weight = 0] + fn accumulate_dummy(origin, increase_by: T::Balance) { + unimplemented!(); + } + + fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { + 0 + } + } +} + +impl sp_runtime::traits::ValidateUnsigned for Module { + type Call = Call; + + fn validate_unsigned( + _source: sp_runtime::transaction_validity::TransactionSource, + _call: &Self::Call, + ) -> sp_runtime::transaction_validity::TransactionValidity { + unimplemented!(); + } +} + +pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"12345678"; + +impl sp_inherents::ProvideInherent for Module { + type Call = Call; + type Error = sp_inherents::MakeFatalError; + const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent(_: &Self::Call, _: &sp_inherents::InherentData) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } +} + +#[cfg(test)] +mod tests { + use crate as pallet_test; + + use frame_support::parameter_types; + + type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ); + type TestBlock = sp_runtime::generic::Block; + type TestHeader = sp_runtime::generic::Header; + type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + ::AccountId, + ::Call, + (), + SignedExtra, + >; + + frame_support::construct_runtime!( + pub enum Runtime where + Block = TestBlock, + NodeBlock = TestBlock, + UncheckedExtrinsic = TestUncheckedExtrinsic + { + System: frame_system::{Module, Call, Config, Storage, Event}, + PalletTest: pallet_test::{Module, Call, Storage, Event, Config, ValidateUnsigned, Inherent}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + } + + impl frame_system::Config for Runtime { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = sp_core::H256; + type Call = Call; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = TestHeader; + type Event = (); + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + } + + impl pallet_test::Trait for Runtime { + type Balance = u32; + type Event = (); + } +} diff --git a/frame/support/test/tests/reserved_keyword.rs b/frame/support/test/tests/reserved_keyword.rs index 382b2e498741f..d29b0477c3836 100644 --- a/frame/support/test/tests/reserved_keyword.rs +++ b/frame/support/test/tests/reserved_keyword.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn reserved_keyword() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/reserved_keyword/*.rs"); diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.rs b/frame/support/test/tests/reserved_keyword/on_initialize.rs index 781b72bd04e8c..72d53abfb1034 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.rs +++ b/frame/support/test/tests/reserved_keyword/on_initialize.rs @@ -4,7 +4,7 @@ macro_rules! reserved { mod $reserved { pub use frame_support::dispatch; - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} pub mod system { use frame_support::dispatch; @@ -15,7 +15,7 @@ macro_rules! reserved { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] fn $reserved(_origin) -> dispatch::DispatchResult { unreachable!() } } diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index 5c687ef05005d..b518c60e957c6 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,10 +22,10 @@ use frame_support::{ use sp_io::TestExternalities; use sp_std::result; -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] #[transactional] fn value_commits(_origin, v: u32) { @@ -42,7 +42,7 @@ frame_support::decl_module! { } frame_support::decl_storage!{ - trait Store for Module as StorageTransactions { + trait Store for Module as StorageTransactions { pub Value: u32; pub Map: map hasher(twox_64_concat) String => u32; } @@ -50,14 +50,14 @@ frame_support::decl_storage!{ struct Runtime; -impl frame_support_test::Trait for Runtime { +impl frame_support_test::Config for Runtime { type Origin = u32; type BlockNumber = u32; - type PalletInfo = (); + type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } -impl Trait for Runtime {} +impl Config for Runtime {} #[test] fn storage_transaction_basic_commit() { @@ -195,7 +195,8 @@ fn transactional_annotation() { #[transactional] fn value_rollbacks(v: u32) -> result::Result { set_value(v)?; - Err("nah") + Err("nah")?; + Ok(v) } TestExternalities::default().execute_with(|| { diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index f30b6e4c2af9d..19858731b3a09 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ use frame_support::{ codec::{Encode, Decode, EncodeLike}, traits::Get, weights::RuntimeDbWeight, }; -pub trait Trait: 'static + Eq + Clone { +pub trait Config: 'static + Eq + Clone { type Origin: Into, Self::Origin>> + From>; @@ -34,18 +34,18 @@ pub trait Trait: 'static + Eq + Clone { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] fn noop(origin) {} } } -impl Module { +impl Module { pub fn deposit_event(_event: impl Into) {} } frame_support::decl_event!( - pub enum Event where BlockNumber = ::BlockNumber { + pub enum Event where BlockNumber = ::BlockNumber { ExtrinsicSuccess, ExtrinsicFailed, Ignore(BlockNumber), @@ -53,7 +53,7 @@ frame_support::decl_event!( ); frame_support::decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Test error documentation TestError, /// Error documentation @@ -79,7 +79,7 @@ impl From> for RawOrigin { } } -pub type Origin = RawOrigin<::AccountId>; +pub type Origin = RawOrigin<::AccountId>; #[allow(dead_code)] pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index cebf761a907c7..c4530e9dfd09f 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -impl-trait-for-tuples = "0.1.3" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "3.0.0", default-features = false, path = "../../primitives/version" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] criterion = "0.3.3" -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] diff --git a/frame/system/README.md b/frame/system/README.md index adfa7aa35ddda..106a16bc209d6 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -57,10 +57,10 @@ Import the System module and derive your module's configuration trait from the s use frame_support::{decl_module, dispatch}; use frame_system::{self as system, ensure_signed}; -pub trait Trait: system::Trait {} +pub trait Config: system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn system_module_example(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -72,4 +72,4 @@ decl_module! { } ``` -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 00c965136c0d0..6ed3d456826c2 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,19 +17,19 @@ use criterion::{Criterion, criterion_group, criterion_main, black_box}; use frame_system as system; -use frame_support::{decl_module, decl_event, impl_outer_origin, impl_outer_event, weights::Weight}; +use frame_support::{decl_module, decl_event}; use sp_core::H256; use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; mod module { use super::*; - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; + pub trait Config: system::Config { + type Event: From + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { pub fn deposit_event() = default; } } @@ -41,31 +41,40 @@ mod module { ); } -impl_outer_origin!{ - pub enum Origin for Runtime {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_event! { - pub enum Event for Runtime { - system, - module, +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Module: module::{Module, Call, Event}, } -} +); frame_support::parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + 4 * 1024 * 1024, Perbill::from_percent(75), + ); + pub BlockLength: frame_system::limits::BlockLength = + frame_system::limits::BlockLength::max_with_normal_ratio( + 4 * 1024 * 1024, Perbill::from_percent(75), + ); } -#[derive(Clone, Eq, PartialEq)] -pub struct Runtime; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; @@ -73,22 +82,16 @@ impl system::Trait for Runtime { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl module::Trait for Runtime { +impl module::Config for Runtime { type Event = Event; } diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 26b9bd9230e00..ddf52c96effe6 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-benchmarking" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } -frame-system = { version = "2.0.0", default-features = false, path = "../../system" } -frame-support = { version = "2.0.0", default-features = false, path = "../../support" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../benchmarking" } +frame-system = { version = "3.0.0", default-features = false, path = "../../system" } +frame-support = { version = "3.0.0", default-features = false, path = "../../support" } +sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } [dev-dependencies] serde = { version = "1.0.101" } -sp-io ={ version = "2.0.0", path = "../../../primitives/io" } +sp-io ={ version = "3.0.0", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index b631d00e47c50..9ff749950ab5e 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,20 +25,21 @@ use sp_std::prelude::*; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use sp_runtime::traits::Hash; use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_support::traits::Get; -use frame_support::storage::{self, StorageMap}; -use frame_system::{Module as System, Call, RawOrigin, DigestItemOf, AccountInfo}; +use frame_support::{ + storage, + traits::Get, + weights::DispatchClass, +}; +use frame_system::{Module as System, Call, RawOrigin, DigestItemOf}; mod mock; -pub struct Module(System); -pub trait Trait: frame_system::Trait {} +pub struct Module(System); +pub trait Config: frame_system::Config {} benchmarks! { - _ { } - remark { - let b in 0 .. T::MaximumBlockLength::get(); + let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; let remark_message = vec![1; b as usize]; let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), remark_message) @@ -135,22 +136,6 @@ benchmarks! { verify { assert_eq!(storage::unhashed::get_raw(&last_key), None); } - - suicide { - let caller: T::AccountId = whitelisted_caller(); - let account_info = AccountInfo:: { - nonce: 1337u32.into(), - refcount: 0, - data: T::AccountData::default() - }; - frame_system::Account::::insert(&caller, account_info); - let new_account_info = System::::account(caller.clone()); - assert_eq!(new_account_info.nonce, 1337u32.into()); - }: _(RawOrigin::Signed(caller.clone())) - verify { - let account_info = System::::account(&caller); - assert_eq!(account_info.nonce, 0u32.into()); - } } #[cfg(test)] @@ -169,7 +154,6 @@ mod tests { assert_ok!(test_benchmark_set_storage::()); assert_ok!(test_benchmark_kill_storage::()); assert_ok!(test_benchmark_kill_prefix::()); - assert_ok!(test_benchmark_suicide::()); }); } } diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 33255d7b50e19..edc5dfebbd106 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,38 +20,29 @@ #![cfg(test)] use sp_runtime::traits::IdentityLookup; -use frame_support::{ - impl_outer_origin, - dispatch::{Dispatchable, DispatchInfo, PostDispatchInfo}, -}; type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -#[derive(Debug, codec::Encode, codec::Decode)] -pub struct Call; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl Dispatchable for Call { - type Origin = (); - type Trait = (); - type Info = DispatchInfo; - type PostInfo = PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, } -} +); -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; - -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -61,24 +52,18 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index d00094364e3e8..56619d59ddcad 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } [features] default = ["std"] diff --git a/frame/system/rpc/runtime-api/src/lib.rs b/frame/system/rpc/runtime-api/src/lib.rs index 0ead94aabe016..319883c36d748 100644 --- a/frame/system/rpc/runtime-api/src/lib.rs +++ b/frame/system/rpc/runtime-api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index d0a346519ca23..de635b4fb91a6 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::{Trait, Module}; +use crate::{Config, Module}; use sp_runtime::{ traits::{SignedExtension, Zero}, transaction_validity::TransactionValidityError, @@ -24,9 +24,9 @@ use sp_runtime::{ /// Genesis hash check to provide replay protection between different networks. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckGenesis(sp_std::marker::PhantomData); +pub struct CheckGenesis(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckGenesis { +impl sp_std::fmt::Debug for CheckGenesis { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckGenesis") @@ -38,16 +38,16 @@ impl sp_std::fmt::Debug for CheckGenesis { } } -impl CheckGenesis { +impl CheckGenesis { /// Creates new `SignedExtension` to check genesis hash. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { +impl SignedExtension for CheckGenesis { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckGenesis"; diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 7e3f65d0324d7..1e8eb32a3d3c2 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::{Trait, Module, BlockHash}; -use frame_support::StorageMap; +use crate::{Config, Module, BlockHash}; use sp_runtime::{ generic::Era, traits::{SignedExtension, DispatchInfoOf, SaturatedConversion}, @@ -28,16 +27,16 @@ use sp_runtime::{ /// Check for transaction mortality. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckMortality(Era, sp_std::marker::PhantomData); +pub struct CheckMortality(Era, sp_std::marker::PhantomData); -impl CheckMortality { +impl CheckMortality { /// utility constructor. Used only in client/factory code. pub fn from(era: Era) -> Self { Self(era, sp_std::marker::PhantomData) } } -impl sp_std::fmt::Debug for CheckMortality { +impl sp_std::fmt::Debug for CheckMortality { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckMortality({:?})", self.0) @@ -49,7 +48,7 @@ impl sp_std::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { +impl SignedExtension for CheckMortality { type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = T::Hash; @@ -111,7 +110,7 @@ mod tests { let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let len = 0_usize; let ext = ( - crate::CheckWeight::::default(), + crate::CheckWeight::::new(), CheckMortality::::from(Era::mortal(16, 256)), ); System::set_block_number(17); diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index e7316457aaffc..bc48be925bc0d 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,11 +16,8 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::Trait; -use frame_support::{ - weights::DispatchInfo, - StorageMap, -}; +use crate::Config; +use frame_support::weights::DispatchInfo; use sp_runtime::{ traits::{SignedExtension, DispatchInfoOf, Dispatchable, One}, transaction_validity::{ @@ -35,16 +32,16 @@ use sp_std::vec; /// Note that this does not set any priority by default. Make sure that AT LEAST one of the signed /// extension sets some kind of priority upon validating transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckNonce(#[codec(compact)] T::Index); +pub struct CheckNonce(#[codec(compact)] T::Index); -impl CheckNonce { +impl CheckNonce { /// utility constructor. Used only in client/factory code. pub fn from(nonce: T::Index) -> Self { Self(nonce) } } -impl sp_std::fmt::Debug for CheckNonce { +impl sp_std::fmt::Debug for CheckNonce { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckNonce({})", self.0) @@ -56,7 +53,7 @@ impl sp_std::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce where +impl SignedExtension for CheckNonce where T::Call: Dispatchable { type AccountId = T::AccountId; @@ -129,7 +126,8 @@ mod tests { new_test_ext().execute_with(|| { crate::Account::::insert(1, crate::AccountInfo { nonce: 1, - refcount: 0, + consumers: 0, + providers: 0, data: 0, }); let info = DispatchInfo::default(); diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index 8dc4d8d9ceddc..1fd8376d342b2 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; +use crate::{Config, Module}; use codec::{Encode, Decode}; use sp_runtime::{ traits::SignedExtension, @@ -24,9 +24,9 @@ use sp_runtime::{ /// Ensure the runtime version registered in the transaction is the same as at present. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckSpecVersion(sp_std::marker::PhantomData); +pub struct CheckSpecVersion(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckSpecVersion { +impl sp_std::fmt::Debug for CheckSpecVersion { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckSpecVersion") @@ -38,16 +38,16 @@ impl sp_std::fmt::Debug for CheckSpecVersion { } } -impl CheckSpecVersion { +impl CheckSpecVersion { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { +impl SignedExtension for CheckSpecVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckSpecVersion"; diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index ee6f3349365b9..fa11a0a5727f1 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; +use crate::{Config, Module}; use codec::{Encode, Decode}; use sp_runtime::{ traits::SignedExtension, @@ -24,9 +24,9 @@ use sp_runtime::{ /// Ensure the transaction version registered in the transaction is the same as at present. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckTxVersion(sp_std::marker::PhantomData); +pub struct CheckTxVersion(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckTxVersion { +impl sp_std::fmt::Debug for CheckTxVersion { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckTxVersion") @@ -38,16 +38,16 @@ impl sp_std::fmt::Debug for CheckTxVersion { } } -impl CheckTxVersion { +impl CheckTxVersion { /// Create new `SignedExtension` to check transaction version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { +impl SignedExtension for CheckTxVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckTxVersion"; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 39439a3e2d8ce..70116f4b6524b 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; +use crate::{limits::BlockWeights, Config, Module}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Printable}, @@ -23,65 +23,31 @@ use sp_runtime::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, TransactionPriority, }, - Perbill, DispatchResult, + DispatchResult, }; use frame_support::{ traits::{Get}, weights::{PostDispatchInfo, DispatchInfo, DispatchClass, priority::FrameTransactionPriority}, - StorageValue, }; /// Block resource (weight) limit check. #[derive(Encode, Decode, Clone, Eq, PartialEq, Default)] -pub struct CheckWeight(sp_std::marker::PhantomData); +pub struct CheckWeight(sp_std::marker::PhantomData); -impl CheckWeight where - T::Call: Dispatchable +impl CheckWeight where + T::Call: Dispatchable, { - /// Get the quota ratio of each dispatch class type. This indicates that all operational and mandatory - /// dispatches can use the full capacity of any resource, while user-triggered ones can consume - /// a portion. - fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { - match class { - DispatchClass::Operational | DispatchClass::Mandatory - => ::one(), - DispatchClass::Normal => T::AvailableBlockRatio::get(), - } - } - - /// Checks if the current extrinsic does not exceed `MaximumExtrinsicWeight` limit. + /// Checks if the current extrinsic does not exceed the maximum weight a single extrinsic + /// with given `DispatchClass` can have. fn check_extrinsic_weight( info: &DispatchInfoOf, ) -> Result<(), TransactionValidityError> { - match info.class { - // Mandatory transactions are included in a block unconditionally, so - // we don't verify weight. - DispatchClass::Mandatory => Ok(()), - // Normal transactions must not exceed `MaximumExtrinsicWeight`. - DispatchClass::Normal => { - let maximum_weight = T::MaximumExtrinsicWeight::get(); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > maximum_weight { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } - }, - // For operational transactions we make sure it doesn't exceed - // the space alloted for `Operational` class. - DispatchClass::Operational => { - let maximum_weight = T::MaximumBlockWeight::get(); - let operational_limit = - Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let operational_limit = - operational_limit.saturating_sub(T::BlockExecutionWeight::get()); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > operational_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } + let max = T::BlockWeights::get().get(info.class).max_extrinsic; + match max { + Some(max) if info.weight > max => { + Err(InvalidTransaction::ExhaustsResources.into()) }, + _ => Ok(()), } } @@ -90,51 +56,10 @@ impl CheckWeight where /// Upon successes, it returns the new block weight as a `Result`. fn check_block_weight( info: &DispatchInfoOf, - ) -> Result { - let maximum_weight = T::MaximumBlockWeight::get(); - let mut all_weight = Module::::block_weight(); - match info.class { - // If we have a dispatch that must be included in the block, it ignores all the limits. - DispatchClass::Mandatory => { - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - all_weight.add(extrinsic_weight, DispatchClass::Mandatory); - Ok(all_weight) - }, - // If we have a normal dispatch, we follow all the normal rules and limits. - DispatchClass::Normal => { - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Normal) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - if all_weight.get(DispatchClass::Normal) > normal_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(all_weight) - } - }, - // If we have an operational dispatch, allow it if we have not used our full - // "operational space" (independent of existing fullness). - DispatchClass::Operational => { - let operational_limit = Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let operational_space = operational_limit.saturating_sub(normal_limit); - - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Operational) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - - // If it would fit in normally, its okay - if all_weight.total() <= maximum_weight || - // If we have not used our operational space - all_weight.get(DispatchClass::Operational) <= operational_space { - Ok(all_weight) - } else { - Err(InvalidTransaction::ExhaustsResources.into()) - } - } - } + ) -> Result { + let maximum_weight = T::BlockWeights::get(); + let all_weight = Module::::block_weight(); + calculate_consumed_weight::(maximum_weight, all_weight, info) } /// Checks if the current extrinsic can fit into the block with respect to block length limits. @@ -144,19 +69,18 @@ impl CheckWeight where info: &DispatchInfoOf, len: usize, ) -> Result { + let length_limit = T::BlockLength::get(); let current_len = Module::::all_extrinsics_len(); - let maximum_len = T::MaximumBlockLength::get(); - let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; let added_len = len as u32; let next_len = current_len.saturating_add(added_len); - if next_len > limit { + if next_len > *length_limit.max.get(info.class) { Err(InvalidTransaction::ExhaustsResources.into()) } else { Ok(next_len) } } - /// get the priority of an extrinsic denoted by `info`. + /// Get the priority of an extrinsic denoted by `info`. /// /// Operational transaction will be given a fixed initial amount to be fairly distinguished from /// the normal ones. @@ -190,8 +114,8 @@ impl CheckWeight where let next_weight = Self::check_block_weight(info)?; Self::check_extrinsic_weight(info)?; - crate::AllExtrinsicsLen::put(next_len); - crate::BlockWeight::put(next_weight); + crate::AllExtrinsicsLen::::put(next_len); + crate::BlockWeight::::put(next_weight); Ok(()) } @@ -213,7 +137,54 @@ impl CheckWeight where } } -impl SignedExtension for CheckWeight where +pub fn calculate_consumed_weight( + maximum_weight: BlockWeights, + mut all_weight: crate::ConsumedWeight, + info: &DispatchInfoOf, +) -> Result where + Call: Dispatchable, +{ + let extrinsic_weight = info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + let limit_per_class = maximum_weight.get(info.class); + + // add the weight. If class is unlimited, use saturating add instead of checked one. + if limit_per_class.max_total.is_none() && limit_per_class.reserved.is_none() { + all_weight.add(extrinsic_weight, info.class) + } else { + all_weight.checked_add(extrinsic_weight, info.class) + .map_err(|_| InvalidTransaction::ExhaustsResources)?; + } + + let per_class = *all_weight.get(info.class); + + // Check if we don't exceed per-class allowance + match limit_per_class.max_total { + Some(max) if per_class > max => { + return Err(InvalidTransaction::ExhaustsResources.into()); + }, + // There is no `max_total` limit (`None`), + // or we are below the limit. + _ => {}, + } + + // In cases total block weight is exceeded, we need to fall back + // to `reserved` pool if there is any. + if all_weight.total() > maximum_weight.max_block { + match limit_per_class.reserved { + // We are over the limit in reserved pool. + Some(reserved) if per_class > reserved => { + return Err(InvalidTransaction::ExhaustsResources.into()); + } + // There is either no limit in reserved pool (`None`), + // or we are below the limit. + _ => {}, + } + } + + Ok(all_weight) +} + +impl SignedExtension for CheckWeight where T::Call: Dispatchable { type AccountId = T::AccountId; @@ -277,7 +248,7 @@ impl SignedExtension for CheckWeight where // to them actually being useful. Block producers are thus not allowed to include mandatory // extrinsics that result in error. if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { - "Bad mandantory".print(); + "Bad mandatory".print(); e.print(); Err(InvalidTransaction::BadMandatory)? @@ -285,7 +256,7 @@ impl SignedExtension for CheckWeight where let unspent = post_info.calc_unspent(info); if unspent > 0 { - crate::BlockWeight::mutate(|current_weight| { + crate::BlockWeight::::mutate(|current_weight| { current_weight.sub(unspent, info.class); }) } @@ -294,7 +265,7 @@ impl SignedExtension for CheckWeight where } } -impl sp_std::fmt::Debug for CheckWeight { +impl sp_std::fmt::Debug for CheckWeight { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckWeight") @@ -315,12 +286,21 @@ mod tests { use frame_support::{assert_ok, assert_noop}; use frame_support::weights::{Weight, Pays}; + fn block_weights() -> crate::limits::BlockWeights { + ::BlockWeights::get() + } + fn normal_weight_limit() -> Weight { - ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() + block_weights().get(DispatchClass::Normal).max_total + .unwrap_or_else(|| block_weights().max_block) + } + + fn block_weight_limit() -> Weight { + block_weights().max_block } fn normal_length_limit() -> u32 { - ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() + *::BlockLength::get().max.get(DispatchClass::Normal) } #[test] @@ -341,7 +321,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + assert!(System::block_weight().total() > block_weight_limit()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -352,7 +332,7 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: ::MaximumExtrinsicWeight::get() + 1, + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + 1, class: DispatchClass::Normal, ..Default::default() }; @@ -368,13 +348,12 @@ mod tests { #[test] fn operational_extrinsic_limited_by_operational_space_limit() { new_test_ext().execute_with(|| { - let operational_limit = CheckWeight::::get_dispatch_limit_ratio( - DispatchClass::Operational - ) * ::MaximumBlockWeight::get(); - let base_weight = ::ExtrinsicBaseWeight::get(); - let block_base = ::BlockExecutionWeight::get(); + let weights = block_weights(); + let operational_limit = weights.get(DispatchClass::Operational).max_total + .unwrap_or_else(|| weights.max_block); + let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; - let weight = operational_limit - base_weight - block_base; + let weight = operational_limit - base_weight; let okay = DispatchInfo { weight, class: DispatchClass::Operational, @@ -406,7 +385,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + assert!(System::block_weight().total() > block_weight_limit()); }); } @@ -426,8 +405,8 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), 768); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -446,8 +425,8 @@ mod tests { // Extra 15 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), 266); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); }); } @@ -485,8 +464,8 @@ mod tests { let normal_limit = normal_weight_limit(); // given almost full block - BlockWeight::mutate(|current_weight| { - current_weight.put(normal_limit, DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); @@ -495,7 +474,7 @@ mod tests { // likewise for length limit. let len = 100_usize; - AllExtrinsicsLen::put(normal_length_limit()); + AllExtrinsicsLen::::put(normal_length_limit()); assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); }) @@ -528,7 +507,7 @@ mod tests { let normal = DispatchInfo::default(); let normal_limit = normal_weight_limit() as usize; let reset_check_weight = |tx, s, f| { - AllExtrinsicsLen::put(0); + AllExtrinsicsLen::::put(0); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } }; @@ -552,19 +531,20 @@ mod tests { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); let small = DispatchInfo { weight: 100, ..Default::default() }; + let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get(), + weight: normal_limit - base_extrinsic, ..Default::default() }; let big = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get() + 1, + weight: normal_limit - base_extrinsic + 1, ..Default::default() }; let len = 0_usize; let reset_check_weight = |i, f, s| { - BlockWeight::mutate(|current_weight| { - current_weight.put(s, DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(s, DispatchClass::Normal) }); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } @@ -586,21 +566,23 @@ mod tests { pays_fee: Default::default(), }; let len = 0_usize; + let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight - BlockWeight::mutate(|current_weight| { - current_weight.put(256 - ::ExtrinsicBaseWeight::get(), DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(256 - base_extrinsic, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(BlockWeight::get().total(), info.weight + 256); + assert_eq!(BlockWeight::::get().total(), info.weight + 256); assert!( CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) .is_ok() ); assert_eq!( - BlockWeight::get().total(), + BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256, ); }) @@ -616,14 +598,15 @@ mod tests { }; let len = 0_usize; - BlockWeight::mutate(|current_weight| { - current_weight.put(128, DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(128, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( - BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), + BlockWeight::::get().total(), + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert!( @@ -631,8 +614,8 @@ mod tests { .is_ok() ); assert_eq!( - BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), + BlockWeight::::get().total(), + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } @@ -640,17 +623,81 @@ mod tests { #[test] fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { + let weights = block_weights(); let free = DispatchInfo { weight: 0, ..Default::default() }; let len = 0_usize; - // Initial weight from `BlockExecutionWeight` - assert_eq!(System::block_weight().total(), ::BlockExecutionWeight::get()); + // Initial weight from `weights.base_block` + assert_eq!( + System::block_weight().total(), + weights.base_block + ); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); assert!(r.is_ok()); assert_eq!( System::block_weight().total(), - ::ExtrinsicBaseWeight::get() + ::BlockExecutionWeight::get() + weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block ); }) } + + #[test] + fn normal_and_mandatory_tracked_separately() { + new_test_ext().execute_with(|| { + // Max block is 1024 + // Max normal is 768 (75%) + // Max mandatory is unlimited + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let mandatory = DispatchInfo { weight: 1019, class: DispatchClass::Mandatory, ..Default::default() }; + + let len = 0_usize; + + assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + assert_eq!(System::block_weight().total(), 768); + assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), 1024 + 768); + assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); + }); + } + + #[test] + fn no_max_total_should_still_be_limited_by_max_block() { + // given + let maximum_weight = BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::non_mandatory(), |w| { + w.base_extrinsic = 0; + w.max_total = Some(20); + }) + .for_class(DispatchClass::Mandatory, |w| { + w.base_extrinsic = 0; + w.reserved = Some(5); + w.max_total = None; + }) + .build_or_panic(); + let all_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => 10, + DispatchClass::Operational => 10, + DispatchClass::Mandatory => 0, + }); + assert_eq!(maximum_weight.max_block, all_weight.total()); + + // fits into reserved + let mandatory1 = DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; + // does not fit into reserved and the block is full. + let mandatory2 = DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; + + // when + let result1 = calculate_consumed_weight::<::Call>( + maximum_weight.clone(), all_weight.clone(), &mandatory1 + ); + let result2 = calculate_consumed_weight::<::Call>( + maximum_weight, all_weight, &mandatory2 + ); + + // then + assert!(result2.is_err()); + assert!(result1.is_ok()); + } } diff --git a/frame/system/src/extensions/mod.rs b/frame/system/src/extensions/mod.rs index ff61353e2d176..8b6c9b49e4d6b 100644 --- a/frame/system/src/extensions/mod.rs +++ b/frame/system/src/extensions/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 595b001ea6b01..e521a082a91ca 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # System Module +//! # System Pallet //! -//! The System module provides low-level access to core types and cross-cutting utilities. +//! The System pallet provides low-level access to core types and cross-cutting utilities. //! It acts as the base layer for other pallets to interact with the Substrate framework components. //! -//! - [`system::Trait`](./trait.Trait.html) +//! - [`Config`] //! //! ## Overview //! -//! The System module defines the core data types used in a Substrate runtime. -//! It also provides several utility functions (see [`Module`](./struct.Module.html)) for other FRAME pallets. +//! The System pallet defines the core data types used in a Substrate runtime. +//! It also provides several utility functions (see [`Pallet`]) for other FRAME pallets. //! //! In addition, it manages the storage items for extrinsics data, indexes, event records, and digest items, //! among other things that support the execution of the current block. @@ -37,15 +37,15 @@ //! //! ### Dispatchable Functions //! -//! The System module does not implement any dispatchable functions. +//! The System pallet does not implement any dispatchable functions. //! //! ### Public Functions //! -//! See the [`Module`](./struct.Module.html) struct for details of publicly available functions. +//! See the [`Pallet`] struct for details of publicly available functions. //! //! ### Signed Extensions //! -//! The System module defines the following extensions: +//! The System pallet defines the following extensions: //! //! - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not //! exceed the limits. @@ -61,34 +61,6 @@ //! //! Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed //! extensions included in a chain. -//! -//! ## Usage -//! -//! ### Prerequisites -//! -//! Import the System module and derive your module's configuration trait from the system trait. -//! -//! ### Example - Get extrinsic count and parent hash for the current block -//! -//! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::{self as system, ensure_signed}; -//! -//! pub trait Trait: system::Trait {} -//! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn system_module_example(origin) -> dispatch::DispatchResult { -//! let _sender = ensure_signed(origin)?; -//! let _extrinsic_count = >::extrinsic_count(); -//! let _parent_hash = >::parent_hash(); -//! Ok(()) -//! } -//! } -//! } -//! # fn main() { } -//! ``` #![cfg_attr(not(feature = "std"), no_std)] @@ -97,7 +69,6 @@ use serde::Serialize; use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_std::map; -use sp_std::convert::Infallible; use sp_std::marker::PhantomData; use sp_std::fmt::Debug; use sp_version::RuntimeVersion; @@ -106,40 +77,44 @@ use sp_runtime::{ traits::{ self, CheckEqual, AtLeast32Bit, Zero, Lookup, LookupError, SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, - MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, AtLeast32BitUnsigned + MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, + Dispatchable, AtLeast32BitUnsigned, Saturating, StoredMapError, }, offchain::storage_lock::BlockNumberProvider, }; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, Parameter, ensure, debug, - storage, + Parameter, debug, storage, traits::{ - Contains, Get, PalletInfo, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, + Contains, Get, PalletInfo, OnNewAccount, OnKilledAccount, HandleLifetime, StoredMap, EnsureOrigin, OriginTrait, Filter, }, weights::{ Weight, RuntimeDbWeight, DispatchInfo, DispatchClass, - extract_actual_weight, + extract_actual_weight, PerDispatchClass, }, dispatch::DispatchResultWithPostInfo, }; use codec::{Encode, Decode, FullCodec, EncodeLike}; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; pub mod offchain; +pub mod limits; #[cfg(test)] pub(crate) mod mock; mod extensions; -mod weight; pub mod weights; #[cfg(test)] mod tests; +#[cfg(feature = "std")] +pub mod mocking; + pub use extensions::{ check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, @@ -160,398 +135,157 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { H::ordered_trie_root(xts) } -pub trait Trait: 'static + Eq + Clone { - /// The basic call filter to use in Origin. All origins are built with this filter as base, - /// except Root. - type BaseCallFilter: Filter; - - /// The `Origin` type used by dispatchable calls. - type Origin: - Into, Self::Origin>> - + From> - + Clone - + OriginTrait; - - /// The aggregated `Call` type. - type Call: Dispatchable + Debug; - - /// Account index (aka nonce) type. This stores the number of previous transactions associated - /// with a sender account. - type Index: - Parameter + Member + MaybeSerialize + Debug + Default + MaybeDisplay + AtLeast32Bit - + Copy; - - /// The block number type used by the runtime. - type BlockNumber: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + - AtLeast32BitUnsigned + Default + Bounded + Copy + sp_std::hash::Hash + - sp_std::str::FromStr + MaybeMallocSizeOf; - - /// The output of the `Hashing` function. - type Hash: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleBitOps + Ord - + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + MaybeMallocSizeOf; - - /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). - type Hashing: Hash; - - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord - + Default; - - /// Converting trait to take a source type and convert to `AccountId`. - /// - /// Used to define the type and conversion mechanism for referencing accounts in transactions. - /// It's perfectly reasonable for this to be an identity conversion (with the source type being - /// `AccountId`), but other modules (e.g. Indices module) may provide more functional/efficient - /// alternatives. - type Lookup: StaticLookup; - - /// The block header. - type Header: Parameter + traits::Header< - Number = Self::BlockNumber, - Hash = Self::Hash, - >; - - /// The aggregated event type of the runtime. - type Event: Parameter + Member + From> + Debug; - - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount: Get; - - /// The maximum weight of a block. - type MaximumBlockWeight: Get; +/// An object to track the currently used extrinsic weight in a block. +pub type ConsumedWeight = PerDispatchClass; - /// The weight of runtime database operations the runtime can invoke. - type DbWeight: Get; +pub use pallet::*; - /// The base weight of executing a block, independent of the transactions in the block. - type BlockExecutionWeight: Get; +#[frame_support::pallet] +pub mod pallet { + use crate::{*, pallet_prelude::*, self as frame_system}; + use frame_support::pallet_prelude::*; - /// The base weight of an Extrinsic in the block, independent of the of extrinsic being executed. - type ExtrinsicBaseWeight: Get; + /// System configuration trait. Implemented by runtime. + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: 'static + Eq + Clone { + /// The basic call filter to use in Origin. All origins are built with this filter as base, + /// except Root. + type BaseCallFilter: Filter; - /// The maximal weight of a single Extrinsic. This should be set to at most - /// `MaximumBlockWeight - AverageOnInitializeWeight`. The limit only applies to extrinsics - /// containing `Normal` dispatch class calls. - type MaximumExtrinsicWeight: Get; + /// Block & extrinsics weights: base values and limits. + #[pallet::constant] + type BlockWeights: Get; - /// The maximum length of a block (in bytes). - type MaximumBlockLength: Get; - - /// The portion of the block that is available to normal transaction. The rest can only be used - /// by operational transactions. This can be applied to any resource limit managed by the system - /// module, including weight and length. - type AvailableBlockRatio: Get; - - /// Get the chain's current version. - type Version: Get; - - /// Provides information about the pallet setup in the runtime. - /// - /// Expects the `PalletInfo` type that is being generated by `construct_runtime!` in the - /// runtime. - /// - /// For tests it is okay to use `()` as type, however it will provide "useless" data. - type PalletInfo: PalletInfo; - - /// Data to be associated with an account (other than nonce/transaction counter, which this - /// module does regardless). - type AccountData: Member + FullCodec + Clone + Default; - - /// Handler for when a new account has just been created. - type OnNewAccount: OnNewAccount; - - /// A function that is invoked when an account has been determined to be dead. - /// - /// All resources should be cleaned up associated with the given account. - type OnKilledAccount: OnKilledAccount; - - type SystemWeightInfo: WeightInfo; -} - -pub type DigestOf = generic::Digest<::Hash>; -pub type DigestItemOf = generic::DigestItem<::Hash>; - -pub type Key = Vec; -pub type KeyValue = (Vec, Vec); - -/// A phase of a block's execution. -#[derive(Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] -pub enum Phase { - /// Applying an extrinsic. - ApplyExtrinsic(u32), - /// Finalizing the block. - Finalization, - /// Initializing the block. - Initialization, -} - -impl Default for Phase { - fn default() -> Self { - Self::Initialization - } -} - -/// Record of an event happening. -#[derive(Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] -pub struct EventRecord { - /// The phase of the block it happened in. - pub phase: Phase, - /// The event itself. - pub event: E, - /// The list of the topics this event has. - pub topics: Vec, -} - -/// Origin for the System module. -#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode)] -pub enum RawOrigin { - /// The system itself ordained this dispatch to happen: this is the highest privilege level. - Root, - /// It is signed by some public key and we provide the `AccountId`. - Signed(AccountId), - /// It is signed by nobody, can be either: - /// * included and agreed upon by the validators anyway, - /// * or unsigned transaction validated by a module. - None, -} - -impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } -} - -/// Exposed trait-generic origin type. -pub type Origin = RawOrigin<::AccountId>; - -// Create a Hash with 69 for each byte, -// only used to build genesis config. -#[cfg(feature = "std")] -fn hash69 + Default>() -> T { - let mut h = T::default(); - h.as_mut().iter_mut().for_each(|byte| *byte = 69); - h -} - -/// This type alias represents an index of an event. -/// -/// We use `u32` here because this index is used as index for `Events` -/// which can't contain more than `u32::max_value()` items. -type EventIndex = u32; - -/// Type used to encode the number of references an account has. -pub type RefCount = u32; - -/// Information of an account. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -pub struct AccountInfo { - /// The number of transactions this account has sent. - pub nonce: Index, - /// The number of other modules that currently depend on this account's existence. The account - /// cannot be reaped until this is zero. - pub refcount: RefCount, - /// The additional data that belongs to this account. Used to store the balance(s) in a lot of - /// chains. - pub data: AccountData, -} - -/// Stores the `spec_version` and `spec_name` of when the last runtime upgrade -/// happened. -#[derive(sp_runtime::RuntimeDebug, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub struct LastRuntimeUpgradeInfo { - pub spec_version: codec::Compact, - pub spec_name: sp_runtime::RuntimeString, -} - -impl LastRuntimeUpgradeInfo { - /// Returns if the runtime was upgraded in comparison of `self` and `current`. - /// - /// Checks if either the `spec_version` increased or the `spec_name` changed. - pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { - current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name - } -} - -impl From for LastRuntimeUpgradeInfo { - fn from(version: sp_version::RuntimeVersion) -> Self { - Self { - spec_version: version.spec_version.into(), - spec_name: version.spec_name, - } - } -} - -decl_storage! { - trait Store for Module as System { - /// The full account information for a particular account ID. - pub Account get(fn account): - map hasher(blake2_128_concat) T::AccountId => AccountInfo; - - /// Total extrinsics count for the current block. - ExtrinsicCount: Option; - - /// The current weight for the block. - BlockWeight get(fn block_weight): weight::ExtrinsicsWeight; - - /// Total length (in bytes) for all extrinsics put together, for the current block. - AllExtrinsicsLen: Option; - - /// Map of block numbers to block hashes. - pub BlockHash get(fn block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): - map hasher(twox_64_concat) T::BlockNumber => T::Hash; - - /// Extrinsics data for the current block (maps an extrinsic's index to its data). - ExtrinsicData get(fn extrinsic_data): map hasher(twox_64_concat) u32 => Vec; - - /// The current block number being processed. Set by `execute_block`. - Number get(fn block_number): T::BlockNumber; - - /// Hash of the previous block. - ParentHash get(fn parent_hash) build(|_| hash69()): T::Hash; + /// The maximum length of a block (in bytes). + #[pallet::constant] + type BlockLength: Get; + + /// The `Origin` type used by dispatchable calls. + type Origin: + Into, Self::Origin>> + + From> + + Clone + + OriginTrait; + + /// The aggregated `Call` type. + type Call: Dispatchable + Debug; + + /// Account index (aka nonce) type. This stores the number of previous transactions associated + /// with a sender account. + type Index: + Parameter + Member + MaybeSerializeDeserialize + Debug + Default + MaybeDisplay + AtLeast32Bit + + Copy; + + /// The block number type used by the runtime. + type BlockNumber: + Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + + AtLeast32BitUnsigned + Default + Bounded + Copy + sp_std::hash::Hash + + sp_std::str::FromStr + MaybeMallocSizeOf; + + /// The output of the `Hashing` function. + type Hash: + Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleBitOps + Ord + + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + MaybeMallocSizeOf; + + /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). + type Hashing: Hash; + + /// The user account identifier type for the runtime. + type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + + Default; + + /// Converting trait to take a source type and convert to `AccountId`. + /// + /// Used to define the type and conversion mechanism for referencing accounts in transactions. + /// It's perfectly reasonable for this to be an identity conversion (with the source type being + /// `AccountId`), but other pallets (e.g. Indices pallet) may provide more functional/efficient + /// alternatives. + type Lookup: StaticLookup; - /// Extrinsics root of the current block, also part of the block header. - ExtrinsicsRoot get(fn extrinsics_root): T::Hash; + /// The block header. + type Header: Parameter + traits::Header< + Number=Self::BlockNumber, + Hash=Self::Hash, + >; - /// Digest of the current block, also part of the block header. - Digest get(fn digest): DigestOf; + /// The aggregated event type of the runtime. + type Event: Parameter + Member + From> + Debug + IsType<::Event>; - /// Events deposited for the current block. - Events get(fn events): Vec>; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + #[pallet::constant] + type BlockHashCount: Get; - /// The number of events in the `Events` list. - EventCount get(fn event_count): EventIndex; + /// The weight of runtime database operations the runtime can invoke. + #[pallet::constant] + type DbWeight: Get; - // TODO: https://github.com/paritytech/substrate/issues/2553 - // Possibly, we can improve it by using something like: - // `Option<(BlockNumber, Vec)>`, however in this case we won't be able to use - // `EventTopics::append`. + /// Get the chain's current version. + #[pallet::constant] + type Version: Get; - /// Mapping between a topic (represented by T::Hash) and a vector of indexes - /// of events in the `>` list. + /// Provides information about the pallet setup in the runtime. /// - /// All topic vectors have deterministic storage locations depending on the topic. This - /// allows light-clients to leverage the changes trie storage tracking mechanism and - /// in case of changes fetch the list of events of interest. + /// Expects the `PalletInfo` type that is being generated by `construct_runtime!` in the + /// runtime. /// - /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just - /// the `EventIndex` then in case if the topic has the same contents on the next block - /// no notification will be triggered thus the event might be lost. - EventTopics get(fn event_topics): map hasher(blake2_128_concat) T::Hash => Vec<(T::BlockNumber, EventIndex)>; - - /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. - pub LastRuntimeUpgrade build(|_| Some(LastRuntimeUpgradeInfo::from(T::Version::get()))): Option; + /// For tests it is okay to use `()` as type, however it will provide "useless" data. + type PalletInfo: PalletInfo; - /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. - UpgradedToU32RefCount build(|_| true): bool; + /// Data to be associated with an account (other than nonce/transaction counter, which this + /// pallet does regardless). + type AccountData: Member + FullCodec + Clone + Default; - /// The execution phase of the block. - ExecutionPhase: Option; - } - add_extra_genesis { - config(changes_trie_config): Option; - #[serde(with = "sp_core::bytes")] - config(code): Vec; + /// Handler for when a new account has just been created. + type OnNewAccount: OnNewAccount; - build(|config: &GenesisConfig| { - use codec::Encode; - - sp_io::storage::set(well_known_keys::CODE, &config.code); - sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); - - if let Some(ref changes_trie_config) = config.changes_trie_config { - sp_io::storage::set( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ); - } - }); - } -} + /// A function that is invoked when an account has been determined to be dead. + /// + /// All resources should be cleaned up associated with the given account. + type OnKilledAccount: OnKilledAccount; -decl_event!( - /// Event for the System module. - pub enum Event where AccountId = ::AccountId { - /// An extrinsic completed successfully. \[info\] - ExtrinsicSuccess(DispatchInfo), - /// An extrinsic failed. \[error, info\] - ExtrinsicFailed(DispatchError, DispatchInfo), - /// `:code` was updated. - CodeUpdated, - /// A new \[account\] was created. - NewAccount(AccountId), - /// An \[account\] was reaped. - KilledAccount(AccountId), - } -); + type SystemWeightInfo: WeightInfo; -decl_error! { - /// Error for the System module - pub enum Error for Module { - /// The name of specification does not match between the current runtime - /// and the new runtime. - InvalidSpecName, - /// The specification version is not allowed to decrease between the current runtime - /// and the new runtime. - SpecVersionNeedsToIncrease, - /// Failed to extract the runtime version from the new runtime. + /// The designated SS85 prefix of this chain. /// - /// Either calling `Core_version` or decoding `RuntimeVersion` failed. - FailedToExtractRuntimeVersion, - /// Suicide called when the account has non-default composite data. - NonDefaultComposite, - /// There is a non-zero reference count preventing the account from being purged. - NonZeroRefCount, + /// This replaces the "ss58Format" property declared in the chain spec. Reason is + /// that the runtime should know about the prefix in order to make use of it as + /// an identifier of the chain. + #[pallet::constant] + type SS58Prefix: Get; } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { - type Error = Error; - - /// The maximum number of blocks to allow in mortal eras. - const BlockHashCount: T::BlockNumber = T::BlockHashCount::get(); - - /// The maximum weight of a block. - const MaximumBlockWeight: Weight = T::MaximumBlockWeight::get(); - - /// The weight of runtime database operations the runtime can invoke. - const DbWeight: RuntimeDbWeight = T::DbWeight::get(); - /// The base weight of executing a block, independent of the transactions in the block. - const BlockExecutionWeight: Weight = T::BlockExecutionWeight::get(); - - /// The base weight of an Extrinsic in the block, independent of the of extrinsic being executed. - const ExtrinsicBaseWeight: Weight = T::ExtrinsicBaseWeight::get(); - - /// The maximum length of a block (in bytes). - const MaximumBlockLength: u32 = T::MaximumBlockLength::get(); + #[pallet::pallet] + #[pallet::generate_store(pub (super) trait Store)] + pub struct Pallet(_); + #[pallet::hooks] + impl Hooks> for Pallet { fn on_runtime_upgrade() -> frame_support::weights::Weight { - if !UpgradedToU32RefCount::get() { - Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| - Some(AccountInfo { nonce, refcount: rc as RefCount, data }) - ); - UpgradedToU32RefCount::put(true); - T::MaximumBlockWeight::get() + if !UpgradedToDualRefCount::::get() { + UpgradedToDualRefCount::::put(true); + migrations::migrate_to_dual_ref_count::() } else { 0 } } + fn integrity_test() { + T::BlockWeights::get() + .validate() + .expect("The weights are invalid."); + } + } + + #[pallet::call] + impl Pallet { /// A dispatch that will fill the block weight up to the given ratio. // TODO: This should only be available for testing, rather than in general usage, but - // that's not possible at present (since it's within the decl_module macro). - #[weight = *_ratio * T::MaximumBlockWeight::get()] - fn fill_block(origin, _ratio: Perbill) { + // that's not possible at present (since it's within the pallet macro). + #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] + pub(crate) fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { ensure_root(origin)?; + Ok(().into()) } /// Make some on-chain remark. @@ -561,9 +295,10 @@ decl_module! { /// - Base Weight: 0.665 µs, independent of remark length. /// - No DB operations. /// # - #[weight = T::SystemWeightInfo::remark(_remark.len() as u32)] - fn remark(origin, _remark: Vec) { + #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] + pub(crate) fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { ensure_signed(origin)?; + Ok(().into()) } /// Set the number of pages in the WebAssembly environment's heap. @@ -574,10 +309,11 @@ decl_module! { /// - Base Weight: 1.405 µs /// - 1 write to HEAP_PAGES /// # - #[weight = (T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational)] - fn set_heap_pages(origin, pages: u64) { + #[pallet::weight((T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational))] + pub(crate) fn set_heap_pages(origin: OriginFor, pages: u64) -> DispatchResultWithPostInfo { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); + Ok(().into()) } /// Set the new runtime code. @@ -590,13 +326,14 @@ decl_module! { /// The weight of this function is dependent on the runtime, but generally this is very expensive. /// We will treat this as a full block. /// # - #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] - pub fn set_code(origin, code: Vec) { + #[pallet::weight((T::BlockWeights::get().max_block, DispatchClass::Operational))] + pub fn set_code(origin: OriginFor, code: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; Self::can_set_code(&code)?; storage::unhashed::put_raw(well_known_keys::CODE, &code); - Self::deposit_event(RawEvent::CodeUpdated); + Self::deposit_event(Event::CodeUpdated); + Ok(().into()) } /// Set the new runtime code without doing any checks of the given `code`. @@ -607,11 +344,15 @@ decl_module! { /// - 1 event. /// The weight of this function is dependent on the runtime. We will treat this as a full block. /// # - #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] - pub fn set_code_without_checks(origin, code: Vec) { + #[pallet::weight((T::BlockWeights::get().max_block, DispatchClass::Operational))] + pub fn set_code_without_checks( + origin: OriginFor, + code: Vec, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::CODE, &code); - Self::deposit_event(RawEvent::CodeUpdated); + Self::deposit_event(Event::CodeUpdated); + Ok(().into()) } /// Set the new changes trie configuration. @@ -624,8 +365,11 @@ decl_module! { /// - DB Weight: /// - Writes: Changes Trie, System Digest /// # - #[weight = (T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational)] - pub fn set_changes_trie_config(origin, changes_trie_config: Option) { + #[pallet::weight((T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational))] + pub fn set_changes_trie_config( + origin: OriginFor, + changes_trie_config: Option, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; match changes_trie_config.clone() { Some(changes_trie_config) => storage::unhashed::put_raw( @@ -639,6 +383,7 @@ decl_module! { generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), ); Self::deposit_log(log.into()); + Ok(().into()) } /// Set some items of storage. @@ -649,15 +394,16 @@ decl_module! { /// - Base Weight: 0.568 * i µs /// - Writes: Number of items /// # - #[weight = ( + #[pallet::weight(( T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, - )] - fn set_storage(origin, items: Vec) { + ))] + pub(crate) fn set_storage(origin: OriginFor, items: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; for i in &items { storage::unhashed::put_raw(&i.0, &i.1); } + Ok(().into()) } /// Kill some items from storage. @@ -668,15 +414,16 @@ decl_module! { /// - Base Weight: .378 * i µs /// - Writes: Number of items /// # - #[weight = ( + #[pallet::weight(( T::SystemWeightInfo::kill_storage(keys.len() as u32), DispatchClass::Operational, - )] - fn kill_storage(origin, keys: Vec) { + ))] + pub(crate) fn kill_storage(origin: OriginFor, keys: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; for key in &keys { storage::unhashed::kill(&key); } + Ok(().into()) } /// Kill all storage items with a key that starts with the given prefix. @@ -690,32 +437,347 @@ decl_module! { /// - Base Weight: 0.834 * P µs /// - Writes: Number of subkeys + 1 /// # - #[weight = ( + #[pallet::weight(( T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), DispatchClass::Operational, - )] - fn kill_prefix(origin, prefix: Key, _subkeys: u32) { + ))] + pub(crate) fn kill_prefix( + origin: OriginFor, + prefix: Key, + _subkeys: u32, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; storage::unhashed::kill_prefix(&prefix); + Ok(().into()) } + } + + /// Event for the System pallet. + #[pallet::event] + #[pallet::metadata(T::AccountId = "AccountId")] + pub enum Event { + /// An extrinsic completed successfully. \[info\] + ExtrinsicSuccess(DispatchInfo), + /// An extrinsic failed. \[error, info\] + ExtrinsicFailed(DispatchError, DispatchInfo), + /// `:code` was updated. + CodeUpdated, + /// A new \[account\] was created. + NewAccount(T::AccountId), + /// An \[account\] was reaped. + KilledAccount(T::AccountId), + } + + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; - /// Kill the sending account, assuming there are no references outstanding and the composite - /// data is equal to its default value. + /// Error for the System pallet + #[pallet::error] + pub enum Error { + /// The name of specification does not match between the current runtime + /// and the new runtime. + InvalidSpecName, + /// The specification version is not allowed to decrease between the current runtime + /// and the new runtime. + SpecVersionNeedsToIncrease, + /// Failed to extract the runtime version from the new runtime. /// - /// # - /// - `O(1)` - /// - 1 storage read and deletion. - /// -------------------- - /// Base Weight: 8.626 µs - /// No DB Read or Write operations because caller is already in overlay - /// # - #[weight = (T::SystemWeightInfo::suicide(), DispatchClass::Operational)] - pub fn suicide(origin) { - let who = ensure_signed(origin)?; - let account = Account::::get(&who); - ensure!(account.refcount == 0, Error::::NonZeroRefCount); - ensure!(account.data == T::AccountData::default(), Error::::NonDefaultComposite); - Self::kill_account(&who); + /// Either calling `Core_version` or decoding `RuntimeVersion` failed. + FailedToExtractRuntimeVersion, + /// Suicide called when the account has non-default composite data. + NonDefaultComposite, + /// There is a non-zero reference count preventing the account from being purged. + NonZeroRefCount, + } + + /// Exposed trait-generic origin type. + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId>; + + /// The full account information for a particular account ID. + #[pallet::storage] + #[pallet::getter(fn account)] + pub type Account = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + AccountInfo, + ValueQuery, + >; + + /// Total extrinsics count for the current block. + #[pallet::storage] + pub(super) type ExtrinsicCount = StorageValue<_, u32>; + + /// The current weight for the block. + #[pallet::storage] + #[pallet::getter(fn block_weight)] + pub(super) type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; + + /// Total length (in bytes) for all extrinsics put together, for the current block. + #[pallet::storage] + pub(super) type AllExtrinsicsLen = StorageValue<_, u32>; + + /// Map of block numbers to block hashes. + #[pallet::storage] + #[pallet::getter(fn block_hash)] + pub type BlockHash = + StorageMap<_, Twox64Concat, T::BlockNumber, T::Hash, ValueQuery>; + + /// Extrinsics data for the current block (maps an extrinsic's index to its data). + #[pallet::storage] + #[pallet::getter(fn extrinsic_data)] + pub(super) type ExtrinsicData = + StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; + + /// The current block number being processed. Set by `execute_block`. + #[pallet::storage] + #[pallet::getter(fn block_number)] + pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// Hash of the previous block. + #[pallet::storage] + #[pallet::getter(fn parent_hash)] + pub(super) type ParentHash = StorageValue<_, T::Hash, ValueQuery>; + + /// Digest of the current block, also part of the block header. + #[pallet::storage] + #[pallet::getter(fn digest)] + pub(super) type Digest = StorageValue<_, DigestOf, ValueQuery>; + + /// Events deposited for the current block. + #[pallet::storage] + #[pallet::getter(fn events)] + pub(super) type Events = + StorageValue<_, Vec>, ValueQuery>; + + /// The number of events in the `Events` list. + #[pallet::storage] + #[pallet::getter(fn event_count)] + pub(super) type EventCount = StorageValue<_, EventIndex, ValueQuery>; + + /// Mapping between a topic (represented by T::Hash) and a vector of indexes + /// of events in the `>` list. + /// + /// All topic vectors have deterministic storage locations depending on the topic. This + /// allows light-clients to leverage the changes trie storage tracking mechanism and + /// in case of changes fetch the list of events of interest. + /// + /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just + /// the `EventIndex` then in case if the topic has the same contents on the next block + /// no notification will be triggered thus the event might be lost. + #[pallet::storage] + #[pallet::getter(fn event_topics)] + pub(super) type EventTopics = + StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; + + /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. + #[pallet::storage] + pub type LastRuntimeUpgrade = StorageValue<_, LastRuntimeUpgradeInfo>; + + /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. + #[pallet::storage] + pub(super) type UpgradedToU32RefCount = StorageValue<_, bool, ValueQuery>; + + /// True if we have upgraded so that AccountInfo contains two types of `RefCount`. False + /// (default) if not. + #[pallet::storage] + pub(super) type UpgradedToDualRefCount = StorageValue<_, bool, ValueQuery>; + + /// The execution phase of the block. + #[pallet::storage] + pub(super) type ExecutionPhase = StorageValue<_, Phase>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub changes_trie_config: Option, + #[serde(with = "sp_core::bytes")] + pub code: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + changes_trie_config: Default::default(), + code: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::insert::<_, T::Hash>(T::BlockNumber::zero(), hash69()); + >::put::(hash69()); + >::put(LastRuntimeUpgradeInfo::from(T::Version::get())); + >::put(true); + >::put(true); + + sp_io::storage::set(well_known_keys::CODE, &self.code); + sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); + if let Some(ref changes_trie_config) = self.changes_trie_config { + sp_io::storage::set(well_known_keys::CHANGES_TRIE_CONFIG, &changes_trie_config.encode()); + } + } + } +} + +mod migrations { + use super::*; + + #[allow(dead_code)] + pub fn migrate_all() -> frame_support::weights::Weight { + Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| + Some(AccountInfo { nonce, consumers: rc as RefCount, providers: 1, data }) + ); + T::BlockWeights::get().max_block + } + + pub fn migrate_to_dual_ref_count() -> frame_support::weights::Weight { + Account::::translate::<(T::Index, RefCount, T::AccountData), _>(|_key, (nonce, rc, data)| + Some(AccountInfo { nonce, consumers: rc as RefCount, providers: 1, data }) + ); + T::BlockWeights::get().max_block + } +} + +#[cfg(feature = "std")] +impl GenesisConfig { + /// Direct implementation of `GenesisBuild::build_storage`. + /// + /// Kept in order not to break dependency. + pub fn build_storage(&self) -> Result { + >::build_storage(self) + } + + /// Direct implementation of `GenesisBuild::assimilate_storage`. + /// + /// Kept in order not to break dependency. + pub fn assimilate_storage( + &self, + storage: &mut sp_runtime::Storage + ) -> Result<(), String> { + >::assimilate_storage(self, storage) + } +} + +pub type DigestOf = generic::Digest<::Hash>; +pub type DigestItemOf = generic::DigestItem<::Hash>; + +pub type Key = Vec; +pub type KeyValue = (Vec, Vec); + +/// A phase of a block's execution. +#[derive(Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] +pub enum Phase { + /// Applying an extrinsic. + ApplyExtrinsic(u32), + /// Finalizing the block. + Finalization, + /// Initializing the block. + Initialization, +} + +impl Default for Phase { + fn default() -> Self { + Self::Initialization + } +} + +/// Record of an event happening. +#[derive(Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] +pub struct EventRecord { + /// The phase of the block it happened in. + pub phase: Phase, + /// The event itself. + pub event: E, + /// The list of the topics this event has. + pub topics: Vec, +} + +/// Origin for the System pallet. +#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode)] +pub enum RawOrigin { + /// The system itself ordained this dispatch to happen: this is the highest privilege level. + Root, + /// It is signed by some public key and we provide the `AccountId`. + Signed(AccountId), + /// It is signed by nobody, can be either: + /// * included and agreed upon by the validators anyway, + /// * or unsigned transaction validated by a pallet. + None, +} + +impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::None, + } + } +} + +// Create a Hash with 69 for each byte, +// only used to build genesis config. +#[cfg(feature = "std")] +fn hash69 + Default>() -> T { + let mut h = T::default(); + h.as_mut().iter_mut().for_each(|byte| *byte = 69); + h +} + +/// This type alias represents an index of an event. +/// +/// We use `u32` here because this index is used as index for `Events` +/// which can't contain more than `u32::max_value()` items. +type EventIndex = u32; + +/// Type used to encode the number of references an account has. +pub type RefCount = u32; + +/// Information of an account. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +pub struct AccountInfo { + /// The number of transactions this account has sent. + pub nonce: Index, + /// The number of other modules that currently depend on this account's existence. The account + /// cannot be reaped until this is zero. + pub consumers: RefCount, + /// The number of other modules that allow this account to exist. The account may not be reaped + /// until this is zero. + pub providers: RefCount, + /// The additional data that belongs to this account. Used to store the balance(s) in a lot of + /// chains. + pub data: AccountData, +} + +/// Stores the `spec_version` and `spec_name` of when the last runtime upgrade +/// happened. +#[derive(sp_runtime::RuntimeDebug, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub struct LastRuntimeUpgradeInfo { + pub spec_version: codec::Compact, + pub spec_name: sp_runtime::RuntimeString, +} + +impl LastRuntimeUpgradeInfo { + /// Returns if the runtime was upgraded in comparison of `self` and `current`. + /// + /// Checks if either the `spec_version` increased or the `spec_name` changed. + pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { + current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name + } +} + +impl From for LastRuntimeUpgradeInfo { + fn from(version: sp_version::RuntimeVersion) -> Self { + Self { + spec_version: version.spec_version.into(), + spec_name: version.spec_name, } } } @@ -892,36 +954,162 @@ impl Default for InitKind { } /// Reference status; can be either referenced or unreferenced. +#[derive(RuntimeDebug)] pub enum RefStatus { Referenced, Unreferenced, } -impl Module { - /// Deposits an event into this block's event record. - pub fn deposit_event(event: impl Into) { - Self::deposit_event_indexed(&[], event.into()); +/// Some resultant status relevant to incrementing a provider reference. +#[derive(RuntimeDebug)] +pub enum IncRefStatus { + /// Account was created. + Created, + /// Account already existed. + Existed, +} + +/// Some resultant status relevant to decrementing a provider reference. +#[derive(RuntimeDebug)] +pub enum DecRefStatus { + /// Account was destroyed. + Reaped, + /// Account still exists. + Exists, +} + +/// Some resultant status relevant to decrementing a provider reference. +#[derive(RuntimeDebug)] +pub enum DecRefError { + /// Account cannot have the last provider reference removed while there is a consumer. + ConsumerRemaining, +} + +/// Some resultant status relevant to incrementing a provider reference. +#[derive(RuntimeDebug)] +pub enum IncRefError { + /// Account cannot introduce a consumer while there are no providers. + NoProviders, +} + +impl Module { + pub fn account_exists(who: &T::AccountId) -> bool { + Account::::contains_key(who) } /// Increment the reference counter on an account. + #[deprecated = "Use `inc_consumers` instead"] pub fn inc_ref(who: &T::AccountId) { - Account::::mutate(who, |a| a.refcount = a.refcount.saturating_add(1)); + let _ = Self::inc_consumers(who); } /// Decrement the reference counter on an account. This *MUST* only be done once for every time - /// you called `inc_ref` on `who`. + /// you called `inc_consumers` on `who`. + #[deprecated = "Use `dec_consumers` instead"] pub fn dec_ref(who: &T::AccountId) { - Account::::mutate(who, |a| a.refcount = a.refcount.saturating_sub(1)); + let _ = Self::dec_consumers(who); } /// The number of outstanding references for the account `who`. + #[deprecated = "Use `consumers` instead"] pub fn refs(who: &T::AccountId) -> RefCount { - Account::::get(who).refcount + Self::consumers(who) } /// True if the account has no outstanding references. + #[deprecated = "Use `!is_provider_required` instead"] pub fn allow_death(who: &T::AccountId) -> bool { - Account::::get(who).refcount == 0 + !Self::is_provider_required(who) + } + + /// Increment the reference counter on an account. + /// + /// The account `who`'s `providers` must be non-zero or this will return an error. + pub fn inc_providers(who: &T::AccountId) -> IncRefStatus { + Account::::mutate(who, |a| if a.providers == 0 { + // Account is being created. + a.providers = 1; + Self::on_created_account(who.clone(), a); + IncRefStatus::Created + } else { + a.providers = a.providers.saturating_add(1); + IncRefStatus::Existed + }) + } + + /// Decrement the reference counter on an account. This *MUST* only be done once for every time + /// you called `inc_consumers` on `who`. + pub fn dec_providers(who: &T::AccountId) -> Result { + Account::::try_mutate_exists(who, |maybe_account| { + if let Some(mut account) = maybe_account.take() { + match (account.providers, account.consumers) { + (0, _) => { + // Logic error - cannot decrement beyond zero and no item should + // exist with zero providers. + debug::print!("Logic error: Unexpected underflow in reducing provider"); + Ok(DecRefStatus::Reaped) + }, + (1, 0) => { + Module::::on_killed_account(who.clone()); + Ok(DecRefStatus::Reaped) + } + (1, _) => { + // Cannot remove last provider if there are consumers. + Err(DecRefError::ConsumerRemaining) + } + (x, _) => { + account.providers = x - 1; + *maybe_account = Some(account); + Ok(DecRefStatus::Exists) + } + } + } else { + debug::print!("Logic error: Account already dead when reducing provider"); + Ok(DecRefStatus::Reaped) + } + }) + } + + /// The number of outstanding references for the account `who`. + pub fn providers(who: &T::AccountId) -> RefCount { + Account::::get(who).providers + } + + /// Increment the reference counter on an account. + /// + /// The account `who`'s `providers` must be non-zero or this will return an error. + pub fn inc_consumers(who: &T::AccountId) -> Result<(), IncRefError> { + Account::::try_mutate(who, |a| if a.providers > 0 { + a.consumers = a.consumers.saturating_add(1); + Ok(()) + } else { + Err(IncRefError::NoProviders) + }) + } + + /// Decrement the reference counter on an account. This *MUST* only be done once for every time + /// you called `inc_consumers` on `who`. + pub fn dec_consumers(who: &T::AccountId) { + Account::::mutate(who, |a| if a.consumers > 0 { + a.consumers -= 1; + } else { + debug::print!("Logic error: Unexpected underflow in reducing consumer"); + }) + } + + /// The number of outstanding references for the account `who`. + pub fn consumers(who: &T::AccountId) -> RefCount { + Account::::get(who).consumers + } + + /// True if the account has some outstanding references. + pub fn is_provider_required(who: &T::AccountId) -> bool { + Account::::get(who).consumers != 0 + } + + /// Deposits an event into this block's event record. + pub fn deposit_event(event: impl Into) { + Self::deposit_event_indexed(&[], event.into()); } /// Deposits an event into this block's event record adding this event @@ -934,7 +1122,7 @@ impl Module { // Don't populate events on genesis. if block_number.is_zero() { return } - let phase = ExecutionPhase::get().unwrap_or_default(); + let phase = ExecutionPhase::::get().unwrap_or_default(); let event = EventRecord { phase, event, @@ -943,14 +1131,14 @@ impl Module { // Index of the to be added event. let event_idx = { - let old_event_count = EventCount::get(); + let old_event_count = EventCount::::get(); let new_event_count = match old_event_count.checked_add(1) { // We've reached the maximum number of events at this block, just // don't do anything and leave the event_count unaltered. None => return, Some(nc) => nc, }; - EventCount::put(new_event_count); + EventCount::::put(new_event_count); old_event_count }; @@ -968,17 +1156,17 @@ impl Module { /// Gets extrinsics count. pub fn extrinsic_count() -> u32 { - ExtrinsicCount::get().unwrap_or_default() + ExtrinsicCount::::get().unwrap_or_default() } pub fn all_extrinsics_len() -> u32 { - AllExtrinsicsLen::get().unwrap_or_default() + AllExtrinsicsLen::::get().unwrap_or_default() } - /// Inform the system module of some additional weight that should be accounted for, in the + /// Inform the system pallet of some additional weight that should be accounted for, in the /// current block. /// - /// NOTE: use with extra care; this function is made public only be used for certain modules + /// NOTE: use with extra care; this function is made public only be used for certain pallets /// that need it. A runtime that does not have dynamic calls should never need this and should /// stick to static weights. A typical use case for this is inner calls or smart contract calls. /// Furthermore, it only makes sense to use this when it is presumably _cheap_ to provide the @@ -991,7 +1179,7 @@ impl Module { /// /// Another potential use-case could be for the `on_initialize` and `on_finalize` hooks. pub fn register_extra_weight_unchecked(weight: Weight, class: DispatchClass) { - BlockWeight::mutate(|current_weight| { + BlockWeight::::mutate(|current_weight| { current_weight.add(weight, class); }); } @@ -1000,50 +1188,60 @@ impl Module { pub fn initialize( number: &T::BlockNumber, parent_hash: &T::Hash, - txs_root: &T::Hash, digest: &DigestOf, kind: InitKind, ) { // populate environment - ExecutionPhase::put(Phase::Initialization); + ExecutionPhase::::put(Phase::Initialization); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); >::put(number); >::put(digest); >::put(parent_hash); >::insert(*number - One::one(), parent_hash); - >::put(txs_root); // Remove previous block data from storage - BlockWeight::kill(); + BlockWeight::::kill(); // Kill inspectable storage entries in state when `InitKind::Full`. if let InitKind::Full = kind { >::kill(); - EventCount::kill(); + EventCount::::kill(); >::remove_all(); } } - /// Remove temporary "environment" entries in storage. + /// Remove temporary "environment" entries in storage, compute the storage root and return the + /// resulting header for this block. pub fn finalize() -> T::Header { - ExecutionPhase::kill(); - ExtrinsicCount::kill(); - AllExtrinsicsLen::kill(); + ExecutionPhase::::kill(); + AllExtrinsicsLen::::kill(); - let number = >::take(); - let parent_hash = >::take(); - let mut digest = >::take(); - let extrinsics_root = >::take(); + // The following fields + // + // - > + // - > + // - > + // - > + // - > + // - > + // + // stay to be inspected by the client and will be cleared by `Self::initialize`. + let number = >::get(); + let parent_hash = >::get(); + let mut digest = >::get(); + + let extrinsics = (0..ExtrinsicCount::::take().unwrap_or_default()) + .map(ExtrinsicData::::take) + .collect(); + let extrinsics_root = extrinsics_data_root::(extrinsics); // move block hash pruning window by one block - let block_hash_count = ::get(); - if number > block_hash_count { - let to_remove = number - block_hash_count - One::one(); + let block_hash_count = T::BlockHashCount::get(); + let to_remove = number.saturating_sub(block_hash_count).saturating_sub(One::one()); - // keep genesis hash - if to_remove != Zero::zero() { - >::remove(to_remove); - } + // keep genesis hash + if !to_remove.is_zero() { + >::remove(to_remove); } let storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) @@ -1060,14 +1258,6 @@ impl Module { digest.push(item); } - // The following fields - // - // - > - // - > - // - > - // - // stay to be inspected by the client and will be cleared by `Self::initialize`. - ::new(number, extrinsics_root, storage_root, parent_hash, digest) } @@ -1081,7 +1271,7 @@ impl Module { >::append(item); } - /// Get the basic externalities for this module, useful for tests. + /// Get the basic externalities for this pallet, useful for tests. #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { TestExternalities::new(sp_core::storage::Storage { @@ -1116,11 +1306,11 @@ impl Module { /// Set the current block weight. This should only be used in some integration tests. #[cfg(any(feature = "std", test))] - pub fn set_block_limits(weight: Weight, len: usize) { - BlockWeight::mutate(|current_weight| { - current_weight.put(weight, DispatchClass::Normal) + pub fn set_block_consumed_resources(weight: Weight, len: usize) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(weight, DispatchClass::Normal) }); - AllExtrinsicsLen::put(len as u32); + AllExtrinsicsLen::::put(len as u32); } /// Reset events. Can be used as an alternative to @@ -1128,7 +1318,7 @@ impl Module { #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn reset_events() { >::kill(); - EventCount::kill(); + EventCount::::kill(); >::remove_all(); } @@ -1145,14 +1335,12 @@ impl Module { Account::::mutate(who, |a| a.nonce += T::Index::one()); } - /// Note what the extrinsic data of the current extrinsic index is. If this - /// is called, then ensure `derive_extrinsics` is also called before - /// block-building is completed. + /// Note what the extrinsic data of the current extrinsic index is. /// - /// NOTE: This function is called only when the block is being constructed locally. - /// `execute_block` doesn't note any extrinsics. + /// This is required to be called before applying an extrinsic. The data will used + /// in [`Self::finalize`] to calculate the correct extrinsics root. pub fn note_extrinsic(encoded_xt: Vec) { - ExtrinsicData::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); + ExtrinsicData::::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); } /// To be called immediately after an extrinsic has been applied. @@ -1160,10 +1348,10 @@ impl Module { info.weight = extract_actual_weight(r, &info); Self::deposit_event( match r { - Ok(_) => RawEvent::ExtrinsicSuccess(info), + Ok(_) => Event::ExtrinsicSuccess(info), Err(err) => { sp_runtime::print(err); - RawEvent::ExtrinsicFailed(err.error, info) + Event::ExtrinsicFailed(err.error, info) }, } ); @@ -1171,7 +1359,7 @@ impl Module { let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); - ExecutionPhase::put(Phase::ApplyExtrinsic(next_extrinsic_index)); + ExecutionPhase::::put(Phase::ApplyExtrinsic(next_extrinsic_index)); } /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block @@ -1179,52 +1367,26 @@ impl Module { pub fn note_finished_extrinsics() { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX) .unwrap_or_default(); - ExtrinsicCount::put(extrinsic_index); - ExecutionPhase::put(Phase::Finalization); + ExtrinsicCount::::put(extrinsic_index); + ExecutionPhase::::put(Phase::Finalization); } /// To be called immediately after finishing the initialization of the block - /// (e.g., called `on_initialize` for all modules). + /// (e.g., called `on_initialize` for all pallets). pub fn note_finished_initialize() { - ExecutionPhase::put(Phase::ApplyExtrinsic(0)) - } - - /// Remove all extrinsic data and save the extrinsics trie root. - pub fn derive_extrinsics() { - let extrinsics = (0..ExtrinsicCount::get().unwrap_or_default()) - .map(ExtrinsicData::take).collect(); - let xts_root = extrinsics_data_root::(extrinsics); - >::put(xts_root); + ExecutionPhase::::put(Phase::ApplyExtrinsic(0)) } /// An account is being created. - pub fn on_created_account(who: T::AccountId) { + pub fn on_created_account(who: T::AccountId, _a: &mut AccountInfo) { T::OnNewAccount::on_new_account(&who); - Self::deposit_event(RawEvent::NewAccount(who)); + Self::deposit_event(Event::NewAccount(who)); } /// Do anything that needs to be done after an account has been killed. fn on_killed_account(who: T::AccountId) { T::OnKilledAccount::on_killed_account(&who); - Self::deposit_event(RawEvent::KilledAccount(who)); - } - - /// Remove an account from storage. This should only be done when its refs are zero or you'll - /// get storage leaks in other modules. Nonetheless we assume that the calling logic knows best. - /// - /// This is a no-op if the account doesn't already exist. If it does then it will ensure - /// cleanups (those in `on_killed_account`) take place. - fn kill_account(who: &T::AccountId) { - if Account::::contains_key(who) { - let account = Account::::take(who); - if account.refcount > 0 { - debug::debug!( - target: "system", - "WARNING: Referenced account deleted. This is probably a bug." - ); - } - } - Module::::on_killed_account(who.clone()); + Self::deposit_event(Event::KilledAccount(who)); } /// Determine whether or not it is possible to update the code. @@ -1250,84 +1412,84 @@ impl Module { } } -/// Event handler which calls on_created_account when it happens. -pub struct CallOnCreatedAccount(PhantomData); -impl Happened for CallOnCreatedAccount { - fn happened(who: &T::AccountId) { - Module::::on_created_account(who.clone()); +/// Event handler which registers a provider when created. +pub struct Provider(PhantomData); +impl HandleLifetime for Provider { + fn created(t: &T::AccountId) -> Result<(), StoredMapError> { + Module::::inc_providers(t); + Ok(()) + } + fn killed(t: &T::AccountId) -> Result<(), StoredMapError> { + Module::::dec_providers(t) + .map(|_| ()) + .or_else(|e| match e { + DecRefError::ConsumerRemaining => Err(StoredMapError::ConsumerRemaining), + }) } } -/// Event handler which calls kill_account when it happens. -pub struct CallKillAccount(PhantomData); -impl Happened for CallKillAccount { - fn happened(who: &T::AccountId) { - Module::::kill_account(who) +/// Event handler which registers a consumer when created. +pub struct Consumer(PhantomData); +impl HandleLifetime for Consumer { + fn created(t: &T::AccountId) -> Result<(), StoredMapError> { + Module::::inc_consumers(t) + .map_err(|e| match e { + IncRefError::NoProviders => StoredMapError::NoProviders + }) + } + fn killed(t: &T::AccountId) -> Result<(), StoredMapError> { + Module::::dec_consumers(t); + Ok(()) } } -impl BlockNumberProvider for Module +impl BlockNumberProvider for Pallet { - type BlockNumber = ::BlockNumber; + type BlockNumber = ::BlockNumber; fn current_block_number() -> Self::BlockNumber { - Module::::block_number() + Pallet::::block_number() } } -// Implement StoredMap for a simple single-item, kill-account-on-remove system. This works fine for -// storing a single item which is required to not be empty/default for the account to exist. -// Anything more complex will need more sophisticated logic. -impl StoredMap for Module { +fn is_providing(d: &T) -> bool { + d != &T::default() +} + +/// Implement StoredMap for a simple single-item, provide-when-not-default system. This works fine +/// for storing a single item which allows the account to continue existing as long as it's not +/// empty/default. +/// +/// Anything more complex will need more sophisticated logic. +impl StoredMap for Pallet { fn get(k: &T::AccountId) -> T::AccountData { Account::::get(k).data } - fn is_explicit(k: &T::AccountId) -> bool { - Account::::contains_key(k) - } - fn insert(k: &T::AccountId, data: T::AccountData) { - let existed = Account::::contains_key(k); - Account::::mutate(k, |a| a.data = data); - if !existed { - Self::on_created_account(k.clone()); - } - } - fn remove(k: &T::AccountId) { - Self::kill_account(k) - } - fn mutate(k: &T::AccountId, f: impl FnOnce(&mut T::AccountData) -> R) -> R { - let existed = Account::::contains_key(k); - let r = Account::::mutate(k, |a| f(&mut a.data)); - if !existed { - Self::on_created_account(k.clone()); - } - r - } - fn mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> R) -> R { - Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }).expect("Infallible; qed") - } - fn try_mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> Result) -> Result { - Account::::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let (maybe_prefix, mut maybe_data) = split_inner( - maybe_value.take(), - |account| ((account.nonce, account.refcount), account.data) - ); - f(&mut maybe_data).map(|result| { - *maybe_value = maybe_data.map(|data| { - let (nonce, refcount) = maybe_prefix.unwrap_or_default(); - AccountInfo { nonce, refcount, data } - }); - (existed, maybe_value.is_some(), result) - }) - }).map(|(existed, exists, v)| { - if !existed && exists { - Self::on_created_account(k.clone()); - } else if existed && !exists { - Self::on_killed_account(k.clone()); + + fn try_mutate_exists>( + k: &T::AccountId, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + let account = Account::::get(k); + let was_providing = is_providing(&account.data); + let mut some_data = if was_providing { Some(account.data) } else { None }; + let result = f(&mut some_data)?; + let is_providing = some_data.is_some(); + if !was_providing && is_providing { + Self::inc_providers(k); + } else if was_providing && !is_providing { + match Self::dec_providers(k) { + Err(DecRefError::ConsumerRemaining) => Err(StoredMapError::ConsumerRemaining)?, + Ok(DecRefStatus::Reaped) => return Ok(result), + Ok(DecRefStatus::Exists) => { + // Update value as normal... + } } - v - }) + } else if !was_providing && !is_providing { + return Ok(result) + } + Account::::mutate(k, |a| a.data = some_data.unwrap_or_default()); + Ok(result) } } @@ -1344,21 +1506,14 @@ pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S } } - -impl IsDeadAccount for Module { - fn is_dead_account(who: &T::AccountId) -> bool { - !Account::::contains_key(who) - } -} - -pub struct ChainContext(sp_std::marker::PhantomData); +pub struct ChainContext(PhantomData); impl Default for ChainContext { fn default() -> Self { - ChainContext(sp_std::marker::PhantomData) + ChainContext(PhantomData) } } -impl Lookup for ChainContext { +impl Lookup for ChainContext { type Source = ::Source; type Target = ::Target; @@ -1366,3 +1521,14 @@ impl Lookup for ChainContext { ::lookup(s) } } + +/// Prelude to be used alongside pallet macro, for ease of use. +pub mod pallet_prelude { + pub use crate::{ensure_signed, ensure_none, ensure_root}; + + /// Type alias for the `Origin` associated type of system config. + pub type OriginFor = ::Origin; + + /// Type alias for the `BlockNumber` associated type of system config. + pub type BlockNumberFor = ::BlockNumber; +} diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs new file mode 100644 index 0000000000000..c24d671cdd7ab --- /dev/null +++ b/frame/system/src/limits.rs @@ -0,0 +1,434 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Block resource limits configuration structures. +//! +//! FRAME defines two resources that are limited within a block: +//! - Weight (execution cost/time) +//! - Length (block size) +//! +//! `frame_system` tracks consumption of each of these resources separately for each +//! `DispatchClass`. This module contains configuration object for both resources, +//! which should be passed to `frame_system` configuration when runtime is being set up. + +use frame_support::weights::{Weight, DispatchClass, constants, PerDispatchClass, OneOrMany}; +use sp_runtime::{RuntimeDebug, Perbill}; + +/// Block length limit configuration. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode)] +pub struct BlockLength { + /// Maximal total length in bytes for each extrinsic class. + /// + /// In the worst case, the total block length is going to be: + /// `MAX(max)` + pub max: PerDispatchClass, +} + +impl Default for BlockLength { + fn default() -> Self { + BlockLength::max_with_normal_ratio( + 5 * 1024 * 1024, + DEFAULT_NORMAL_RATIO, + ) + } +} + +impl BlockLength { + /// Create new `BlockLength` with `max` for every class. + pub fn max(max: u32) -> Self { + Self { + max: PerDispatchClass::new(|_| max), + } + } + + /// Create new `BlockLength` with `max` for `Operational` & `Mandatory` + /// and `normal * max` for `Normal`. + pub fn max_with_normal_ratio(max: u32, normal: Perbill) -> Self { + Self { + max: PerDispatchClass::new(|class| if class == DispatchClass::Normal { + normal * max + } else { + max + }), + } + } +} + +#[derive(Default, RuntimeDebug)] +pub struct ValidationErrors { + pub has_errors: bool, + #[cfg(feature = "std")] + pub errors: Vec, +} + +macro_rules! error_assert { + ($cond : expr, $err : expr, $format : expr $(, $params: expr )*$(,)*) => { + if !$cond { + $err.has_errors = true; + #[cfg(feature = "std")] + { $err.errors.push(format!($format $(, &$params )*)); } + } + } +} + +/// A result of validating `BlockWeights` correctness. +pub type ValidationResult = Result; + +/// A ratio of `Normal` dispatch class within block, used as default value for +/// `BlockWeight` and `BlockLength`. The `Default` impls are provided mostly for convenience +/// to use in tests. +const DEFAULT_NORMAL_RATIO: Perbill = Perbill::from_percent(75); + +/// `DispatchClass`-specific weight configuration. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode)] +pub struct WeightsPerClass { + /// Base weight of single extrinsic of given class. + pub base_extrinsic: Weight, + /// Maximal weight of single extrinsic. Should NOT include `base_extrinsic` cost. + /// + /// `None` indicates that this class of extrinsics doesn't have a limit. + pub max_extrinsic: Option, + /// Block maximal total weight for all extrinsics of given class. + /// + /// `None` indicates that weight sum of this class of extrinsics is not + /// restricted. Use this value carefully, since it might produce heavily oversized + /// blocks. + /// + /// In the worst case, the total weight consumed by the class is going to be: + /// `MAX(max_total) + MAX(reserved)`. + pub max_total: Option, + /// Block reserved allowance for all extrinsics of a particular class. + /// + /// Setting to `None` indicates that extrinsics of that class are allowed + /// to go over total block weight (but at most `max_total` for that class). + /// Setting to `Some(x)` guarantees that at least `x` weight of particular class + /// is processed in every block. + pub reserved: Option, +} + +/// Block weight limits & base values configuration. +/// +/// This object is responsible for defining weight limits and base weight values tracked +/// during extrinsic execution. +/// +/// Each block starts with `base_block` weight being consumed right away. Next up the +/// `on_initialize` pallet callbacks are invoked and their cost is added before any extrinsic +/// is executed. This cost is tracked as `Mandatory` dispatch class. +/// +/// | | `max_block` | | +/// | | | | +/// | | | | +/// | | | | +/// | | | #| `on_initialize` +/// | #| `base_block` | #| +/// |NOM| |NOM| +/// ||\_ Mandatory +/// |\__ Operational +/// \___ Normal +/// +/// The remaining capacity can be used to dispatch extrinsics. Note that each dispatch class +/// is being tracked separately, but the sum can't exceed `max_block` (except for `reserved`). +/// Below you can see a picture representing full block with 3 extrinsics (two `Operational` and +/// one `Normal`). Each class has it's own limit `max_total`, but also the sum cannot exceed +/// `max_block` value. +/// -- `Mandatory` limit (unlimited) +/// | # | | | +/// | # | `Ext3` | - - `Operational` limit +/// |# | `Ext2` |- - `Normal` limit +/// | # | `Ext1` | # | +/// | #| `on_initialize` | ##| +/// | #| `base_block` |###| +/// |NOM| |NOM| +/// +/// It should be obvious now that it's possible for one class to reach it's limit (say `Normal`), +/// while the block has still capacity to process more transactions (`max_block` not reached, +/// `Operational` transactions can still go in). Setting `max_total` to `None` disables the +/// per-class limit. This is generally highly recommended for `Mandatory` dispatch class, while it +/// can be dangerous for `Normal` class and should only be done with extra care and consideration. +/// +/// Often it's desirable for some class of transactions to be added to the block despite it being +/// full. For instance one might want to prevent high-priority `Normal` transactions from pushing +/// out lower-priority `Operational` transactions. In such cases you might add a `reserved` capacity +/// for given class. +/// _ +/// # \ +/// # `Ext8` - `reserved` +/// # _/ +/// | # | `Ext7 | - - `Operational` limit +/// |# | `Ext6` | | +/// |# | `Ext5` |-# - `Normal` limit +/// |# | `Ext4` |## | +/// | #| `on_initialize` |###| +/// | #| `base_block` |###| +/// |NOM| |NOM| +/// +/// In the above example, `Ext4-6` fill up the block almost up to `max_block`. `Ext7` would not fit +/// if there wasn't the extra `reserved` space for `Operational` transactions. Note that `max_total` +/// limit applies to `reserved` space as well (i.e. the sum of weights of `Ext7` & `Ext8` mustn't +/// exceed it). Setting `reserved` to `None` allows the extrinsics to always get into the block up +/// to their `max_total` limit. If `max_total` is set to `None` as well, all extrinsics witch +/// dispatchables of given class will always end up in the block (recommended for `Mandatory` +/// dispatch class). +/// +/// As a consequence of `reserved` space, total consumed block weight might exceed `max_block` +/// value, so this parameter should rather be thought of as "target block weight" than a hard limit. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode)] +pub struct BlockWeights { + /// Base weight of block execution. + pub base_block: Weight, + /// Maximal total weight consumed by all kinds of extrinsics (without `reserved` space). + pub max_block: Weight, + /// Weight limits for extrinsics of given dispatch class. + pub per_class: PerDispatchClass, +} + +impl Default for BlockWeights { + fn default() -> Self { + Self::with_sensible_defaults( + 1 * constants::WEIGHT_PER_SECOND, + DEFAULT_NORMAL_RATIO, + ) + } +} + +impl BlockWeights { + /// Get per-class weight settings. + pub fn get(&self, class: DispatchClass) -> &WeightsPerClass { + self.per_class.get(class) + } + + /// Verifies correctness of this `BlockWeights` object. + pub fn validate(self) -> ValidationResult { + fn or_max(w: Option) -> Weight { + w.unwrap_or_else(|| Weight::max_value()) + } + let mut error = ValidationErrors::default(); + + for class in DispatchClass::all() { + let weights = self.per_class.get(*class); + let max_for_class = or_max(weights.max_total); + let base_for_class = weights.base_extrinsic; + let reserved = or_max(weights.reserved); + // Make sure that if total is set it's greater than base_block && + // base_for_class + error_assert!( + (max_for_class > self.base_block && max_for_class > base_for_class) + || max_for_class == 0, + &mut error, + "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", + class, max_for_class, self.base_block, base_for_class, + ); + // Max extrinsic can't be greater than max_for_class. + error_assert!( + weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), + &mut error, + "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", + class, weights.max_extrinsic, + max_for_class.saturating_sub(base_for_class), + ); + // Max extrinsic should not be 0 + error_assert!( + weights.max_extrinsic.unwrap_or_else(|| Weight::max_value()) > 0, + &mut error, + "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", + class, weights.max_extrinsic, + ); + // Make sure that if reserved is set it's greater than base_for_class. + error_assert!( + reserved > base_for_class || reserved == 0, + &mut error, + "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", + class, reserved, base_for_class, + ); + // Make sure max block is greater than max_total if it's set. + error_assert!( + self.max_block >= weights.max_total.unwrap_or(0), + &mut error, + "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", + class, self.max_block, weights.max_total, + ); + // Make sure we can fit at least one extrinsic. + error_assert!( + self.max_block > base_for_class + self.base_block, + &mut error, + "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", + class, self.max_block, base_for_class + self.base_block, + ); + } + + if error.has_errors { + Err(error) + } else { + Ok(self) + } + } + + /// Create new weights definition, with both `Normal` and `Operational` + /// classes limited to given weight. + /// + /// Note there is no reservation for `Operational` class, so this constructor + /// is not suitable for production deployments. + pub fn simple_max(block_weight: Weight) -> Self { + Self::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = 0; + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = block_weight.into(); + }) + .build() + .expect("We only specify max_total and leave base values as defaults; qed") + } + + /// Create a sensible default weights system given only expected maximal block weight and the + /// ratio that `Normal` extrinsics should occupy. + /// + /// Assumptions: + /// - Average block initialization is assumed to be `10%`. + /// - `Operational` transactions have reserved allowance (`1.0 - normal_ratio`) + pub fn with_sensible_defaults( + expected_block_weight: Weight, + normal_ratio: Perbill, + ) -> Self { + let normal_weight = normal_ratio * expected_block_weight; + Self::builder() + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = normal_weight.into(); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = expected_block_weight.into(); + weights.reserved = (expected_block_weight - normal_weight).into(); + }) + .avg_block_initialization(Perbill::from_percent(10)) + .build() + .expect("Sensible defaults are tested to be valid; qed") + } + + /// Start constructing new `BlockWeights` object. + /// + /// By default all kinds except of `Mandatory` extrinsics are disallowed. + pub fn builder() -> BlockWeightsBuilder { + BlockWeightsBuilder { + weights: BlockWeights { + base_block: constants::BlockExecutionWeight::get(), + max_block: 0, + per_class: PerDispatchClass::new(|class| { + let initial = if class == DispatchClass::Mandatory { None } else { Some(0) }; + WeightsPerClass { + base_extrinsic: constants::ExtrinsicBaseWeight::get(), + max_extrinsic: None, + max_total: initial, + reserved: initial, + } + }), + }, + init_cost: None, + } + } +} + +/// An opinionated builder for `Weights` object. +pub struct BlockWeightsBuilder { + weights: BlockWeights, + init_cost: Option, +} + +impl BlockWeightsBuilder { + /// Set base block weight. + pub fn base_block(mut self, base_block: Weight) -> Self { + self.weights.base_block = base_block; + self + } + + /// Average block initialization weight cost. + /// + /// This value is used to derive maximal allowed extrinsic weight for each + /// class, based on the allowance. + /// + /// This is to make sure that extrinsics don't stay forever in the pool, + /// because they could seamingly fit the block (since they are below `max_block`), + /// but the cost of calling `on_initialize` alway prevents them from being included. + pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { + self.init_cost = Some(init_cost); + self + } + + /// Set parameters for particular class. + /// + /// Note: `None` values of `max_extrinsic` will be overwritten in `build` in case + /// `avg_block_initialization` rate is set to a non-zero value. + pub fn for_class( + mut self, + class: impl OneOrMany, + action: impl Fn(&mut WeightsPerClass), + ) -> Self { + for class in class.into_iter() { + action(self.weights.per_class.get_mut(class)); + } + self + } + + /// Construct the `BlockWeights` object. + pub fn build(self) -> ValidationResult { + // compute max extrinsic size + let Self { mut weights, init_cost } = self; + + // compute max block size. + for class in DispatchClass::all() { + weights.max_block = match weights.per_class.get(*class).max_total { + Some(max) if max > weights.max_block => max, + _ => weights.max_block, + }; + } + // compute max size of single extrinsic + if let Some(init_weight) = init_cost.map(|rate| rate * weights.max_block) { + for class in DispatchClass::all() { + let per_class = weights.per_class.get_mut(*class); + if per_class.max_extrinsic.is_none() && init_cost.is_some() { + per_class.max_extrinsic = per_class.max_total + .map(|x| x.saturating_sub(init_weight)) + .map(|x| x.saturating_sub(per_class.base_extrinsic)); + } + } + } + + // Validate the result + weights.validate() + } + + /// Construct the `BlockWeights` object or panic if it's invalid. + /// + /// This is a convenience method to be called whenever you construct a runtime. + pub fn build_or_panic(self) -> BlockWeights { + self.build().expect( + "Builder finished with `build_or_panic`; The panic is expected if runtime weights are not correct" + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_weights_are_valid() { + BlockWeights::default() + .validate() + .unwrap(); + } +} diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index cd67a74114073..2b31929b5da81 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,31 +15,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; +use crate::{self as frame_system, *}; use sp_std::cell::RefCell; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, - testing::Header, -}; -use frame_support::{ - impl_outer_origin, parameter_types, - weights::PostDispatchInfo, + testing::Header, BuildStorage, }; +use frame_support::parameter_types; -impl_outer_origin! { - pub enum Origin for Test where system = super {} -} +type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; +type Block = mocking::MockBlock; -#[derive(Clone, Eq, PartialEq, Debug, Default)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + } +); + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +const MAX_BLOCK_WEIGHT: Weight = 1024; parameter_types! { pub const BlockHashCount: u64 = 10; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumExtrinsicWeight: Weight = 768; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - pub const MaximumBlockLength: u32 = 1024; pub Version: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("test"), impl_name: sp_version::create_runtime_str!("system-test"), @@ -49,12 +51,28 @@ parameter_types! { apis: sp_version::create_apis_vec!([]), transaction_version: 1, }; - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, write: 100, }; + pub RuntimeBlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .base_block(10) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = 5; + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAX_BLOCK_WEIGHT); + weights.reserved = Some( + MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(Perbill::from_percent(0)) + .build_or_panic(); + pub RuntimeBlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(1024, NORMAL_DISPATCH_RATIO); } thread_local!{ @@ -66,22 +84,10 @@ impl OnKilledAccount for RecordKilled { fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } } -#[derive(Debug, codec::Encode, codec::Decode)] -pub struct Call; - -impl Dispatchable for Call { - type Origin = Origin; - type Trait = (); - type Info = DispatchInfo; - type PostInfo = PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); - } -} - -impl Trait for Test { +impl Config for Test { type BaseCallFilter = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; type Origin = Origin; type Call = Call; type Index = u64; @@ -91,34 +97,30 @@ impl Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = Version; - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = u32; type OnNewAccount = (); type OnKilledAccount = RecordKilled; type SystemWeightInfo = (); + type SS58Prefix = (); } -pub type System = Module; -pub type SysEvent = ::Event; +pub type SysEvent = frame_system::Event; -pub const CALL: &::Call = &Call; +/// A simple call, which one doesn't matter. +pub const CALL: &::Call = &Call::System(frame_system::Call::set_heap_pages(0u64)); /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig::default().build_storage::().unwrap().into(); + let mut ext: sp_io::TestExternalities = GenesisConfig::default() + .build_storage().unwrap().into(); // Add to each test the initial weight of a block ext.execute_with(|| System::register_extra_weight_unchecked( - ::BlockExecutionWeight::get(), + ::BlockWeights::get().base_block, DispatchClass::Mandatory )); ext diff --git a/frame/system/src/mocking.rs b/frame/system/src/mocking.rs new file mode 100644 index 0000000000000..9f80c59a9c4d2 --- /dev/null +++ b/frame/system/src/mocking.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provide types to help defining a mock environment when testing pallets. + +use sp_runtime::generic; + +/// An unchecked extrinsic type to be used in tests. +pub type MockUncheckedExtrinsic = generic::UncheckedExtrinsic< + ::AccountId, ::Call, Signature, Extra, +>; + +/// An implementation of `sp_runtime::traits::Block` to be used in tests. +pub type MockBlock = generic::Block< + generic::Header<::BlockNumber, sp_runtime::traits::BlakeTwo256>, + MockUncheckedExtrinsic, +>; diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index ba5cfb8536f24..f2f446913c477 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,7 +63,7 @@ use sp_std::convert::{TryInto, TryFrom}; use sp_std::prelude::{Box, Vec}; use sp_runtime::app_crypto::RuntimeAppPublic; use sp_runtime::traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}; -use frame_support::{debug, storage::StorageMap, RuntimeDebug}; +use frame_support::{debug, RuntimeDebug}; /// Marker struct used to flag using all supported keys to sign a payload. pub struct ForAll {} @@ -376,9 +376,6 @@ impl Clone for Account where /// The point of this trait is to be able to easily convert between `RuntimeAppPublic`, the wrapped /// (generic = non application-specific) crypto types and the `Public` type required by the runtime. /// -/// TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to -/// obtain unwrapped crypto (and wrap it back). -/// /// Example (pseudo-)implementation: /// ```ignore /// // im-online specific crypto @@ -392,6 +389,8 @@ impl Clone for Account where /// type Public = MultiSigner: From; /// type Signature = MulitSignature: From; /// ``` +// TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to +// obtain unwrapped crypto (and wrap it back). pub trait AppCrypto { /// A application-specific crypto. type RuntimeAppPublic: RuntimeAppPublic; @@ -446,9 +445,9 @@ pub trait AppCrypto { /// This trait adds extra bounds to `Public` and `Signature` types of the runtime /// that are necessary to use these types for signing. /// -/// TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? -/// Seems that this may cause issues with bounds resolution. -pub trait SigningTypes: crate::Trait { +// TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? +// Seems that this may cause issues with bounds resolution. +pub trait SigningTypes: crate::Config { /// A public key that is capable of identifing `AccountId`s. /// /// Usually that's either a raw crypto public key (e.g. `sr25519::Public`) or @@ -638,7 +637,7 @@ pub trait SignedPayload: Encode { mod tests { use super::*; use codec::Decode; - use crate::mock::{Test as TestRuntime, Call}; + use crate::mock::{Test as TestRuntime, Call, CALL}; use sp_core::offchain::{testing, TransactionPoolExt}; use sp_runtime::testing::{UintAuthorityId, TestSignature, TestXt}; @@ -709,7 +708,7 @@ mod tests { public: account.public.clone() }, |_payload, _signature| { - Call + CALL.clone() } ); @@ -750,7 +749,7 @@ mod tests { public: account.public.clone() }, |_payload, _signature| { - Call + CALL.clone() } ); @@ -788,7 +787,7 @@ mod tests { public: account.public.clone() }, |_payload, _signature| { - Call + CALL.clone() } ); @@ -828,7 +827,7 @@ mod tests { public: account.public.clone() }, |_payload, _signature| { - Call + CALL.clone() } ); diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 55286d951cc27..ca17edcf4b22a 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,11 @@ use crate::*; use mock::{*, Origin}; use sp_core::H256; -use sp_runtime::DispatchError; -use frame_support::weights::WithPostDispatchInfo; +use sp_runtime::{DispatchError, DispatchErrorWithPostInfo, traits::{Header, BlakeTwo256}}; +use frame_support::{ + weights::WithPostDispatchInfo, + dispatch::PostDispatchInfo, +}; #[test] fn origin_works() { @@ -31,20 +34,22 @@ fn origin_works() { #[test] fn stored_map_works() { new_test_ext().execute_with(|| { - System::insert(&0, 42); - assert!(System::allow_death(&0)); + assert!(System::insert(&0, 42).is_ok()); + assert!(!System::is_provider_required(&0)); - System::inc_ref(&0); - assert!(!System::allow_death(&0)); + assert_eq!(Account::::get(0), AccountInfo { nonce: 0, providers: 1, consumers: 0, data: 42 }); - System::insert(&0, 69); - assert!(!System::allow_death(&0)); + assert!(System::inc_consumers(&0).is_ok()); + assert!(System::is_provider_required(&0)); - System::dec_ref(&0); - assert!(System::allow_death(&0)); + assert!(System::insert(&0, 69).is_ok()); + assert!(System::is_provider_required(&0)); + + System::dec_consumers(&0); + assert!(!System::is_provider_required(&0)); assert!(KILLED.with(|r| r.borrow().is_empty())); - System::kill_account(&0); + assert!(System::remove(&0).is_ok()); assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); }); } @@ -55,7 +60,6 @@ fn deposit_event_should_work() { System::initialize( &1, &[0u8; 32].into(), - &[0u8; 32].into(), &Default::default(), InitKind::Full, ); @@ -67,7 +71,7 @@ fn deposit_event_should_work() { vec![ EventRecord { phase: Phase::Finalization, - event: SysEvent::CodeUpdated, + event: SysEvent::CodeUpdated.into(), topics: vec![], } ] @@ -76,7 +80,6 @@ fn deposit_event_should_work() { System::initialize( &2, &[0u8; 32].into(), - &[0u8; 32].into(), &Default::default(), InitKind::Full, ); @@ -96,17 +99,17 @@ fn deposit_event_should_work() { vec![ EventRecord { phase: Phase::Initialization, - event: SysEvent::NewAccount(32), + event: SysEvent::NewAccount(32).into(), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::KilledAccount(42), + event: SysEvent::KilledAccount(42).into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess(Default::default()), + event: SysEvent::ExtrinsicSuccess(Default::default()).into(), topics: vec![] }, EventRecord { @@ -114,12 +117,12 @@ fn deposit_event_should_work() { event: SysEvent::ExtrinsicFailed( DispatchError::BadOrigin.into(), Default::default() - ), + ).into(), topics: vec![] }, EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(3), + event: SysEvent::NewAccount(3).into(), topics: vec![] }, ] @@ -133,7 +136,6 @@ fn deposit_event_uses_actual_weight() { System::initialize( &1, &[0u8; 32].into(), - &[0u8; 32].into(), &Default::default(), InitKind::Full, ); @@ -171,7 +173,7 @@ fn deposit_event_uses_actual_weight() { weight: 300, .. Default::default() }, - ), + ).into(), topics: vec![] }, EventRecord { @@ -181,7 +183,7 @@ fn deposit_event_uses_actual_weight() { weight: 1000, .. Default::default() }, - ), + ).into(), topics: vec![] }, EventRecord { @@ -191,7 +193,7 @@ fn deposit_event_uses_actual_weight() { weight: 1000, .. Default::default() }, - ), + ).into(), topics: vec![] }, EventRecord { @@ -202,7 +204,7 @@ fn deposit_event_uses_actual_weight() { weight: 999, .. Default::default() }, - ), + ).into(), topics: vec![] }, ] @@ -218,7 +220,6 @@ fn deposit_event_topics() { System::initialize( &BLOCK_NUMBER, &[0u8; 32].into(), - &[0u8; 32].into(), &Default::default(), InitKind::Full, ); @@ -231,9 +232,9 @@ fn deposit_event_topics() { ]; // We deposit a few events with different sets of topics. - System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1)); - System::deposit_event_indexed(&topics[0..1], SysEvent::NewAccount(2)); - System::deposit_event_indexed(&topics[1..2], SysEvent::NewAccount(3)); + System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1).into()); + System::deposit_event_indexed(&topics[0..1], SysEvent::NewAccount(2).into()); + System::deposit_event_indexed(&topics[1..2], SysEvent::NewAccount(3).into()); System::finalize(); @@ -243,17 +244,17 @@ fn deposit_event_topics() { vec![ EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(1), + event: SysEvent::NewAccount(1).into(), topics: topics[0..3].to_vec(), }, EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(2), + event: SysEvent::NewAccount(2).into(), topics: topics[0..1].to_vec(), }, EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(3), + event: SysEvent::NewAccount(3).into(), topics: topics[1..2].to_vec(), } ] @@ -284,7 +285,6 @@ fn prunes_block_hash_mappings() { System::initialize( &n, &[n as u8 - 1; 32].into(), - &[0u8; 32].into(), &Default::default(), InitKind::Full, ); @@ -332,7 +332,7 @@ fn set_code_checks_works() { ("test", 1, 2, Err(Error::::SpecVersionNeedsToIncrease)), ("test", 1, 1, Err(Error::::SpecVersionNeedsToIncrease)), ("test2", 1, 1, Err(Error::::InvalidSpecName)), - ("test", 2, 1, Ok(())), + ("test", 2, 1, Ok(PostDispatchInfo::default())), ("test", 0, 1, Err(Error::::SpecVersionNeedsToIncrease)), ("test", 1, 0, Err(Error::::SpecVersionNeedsToIncrease)), ]; @@ -354,7 +354,7 @@ fn set_code_checks_works() { vec![1, 2, 3, 4], ); - assert_eq!(expected.map_err(DispatchError::from), res); + assert_eq!(expected.map_err(DispatchErrorWithPostInfo::from), res); }); } } @@ -375,7 +375,7 @@ fn set_code_with_real_wasm_blob() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: SysEvent::CodeUpdated, + event: SysEvent::CodeUpdated.into(), topics: vec![], }], ); @@ -403,11 +403,12 @@ fn events_not_emitted_during_genesis() { new_test_ext().execute_with(|| { // Block Number is zero at genesis assert!(System::block_number().is_zero()); - System::on_created_account(Default::default()); + let mut account_data = AccountInfo { nonce: 0, consumers: 0, providers: 0, data: 0 }; + System::on_created_account(Default::default(), &mut account_data); assert!(System::events().is_empty()); // Events will be emitted starting on block 1 System::set_block_number(1); - System::on_created_account(Default::default()); + System::on_created_account(Default::default(), &mut account_data); assert!(System::events().len() == 1); }); } @@ -422,3 +423,28 @@ fn ensure_one_of_works() { assert_eq!(ensure_root_or_signed(RawOrigin::Signed(0)).unwrap(), Either::Right(0)); assert!(ensure_root_or_signed(RawOrigin::None).is_err()) } + +#[test] +fn extrinsics_root_is_calculated_correctly() { + new_test_ext().execute_with(|| { + System::initialize( + &1, + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::note_finished_initialize(); + System::note_extrinsic(vec![1]); + System::note_applied_extrinsic(&Ok(().into()), Default::default()); + System::note_extrinsic(vec![2]); + System::note_applied_extrinsic( + &Err(DispatchError::BadOrigin.into()), + Default::default() + ); + System::note_finished_extrinsics(); + let header = System::finalize(); + + let ext_root = extrinsics_data_root::(vec![vec![1], vec![2]]); + assert_eq!(ext_root, *header.extrinsics_root()); + }); +} diff --git a/frame/system/src/weight.rs b/frame/system/src/weight.rs deleted file mode 100644 index 93295093c4fb8..0000000000000 --- a/frame/system/src/weight.rs +++ /dev/null @@ -1,76 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use codec::{Encode, Decode}; -use frame_support::weights::{Weight, DispatchClass}; -use sp_runtime::RuntimeDebug; - -/// An object to track the currently used extrinsic weight in a block. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -pub struct ExtrinsicsWeight { - normal: Weight, - operational: Weight, -} - -impl ExtrinsicsWeight { - /// Returns the total weight consumed by all extrinsics in the block. - pub fn total(&self) -> Weight { - self.normal.saturating_add(self.operational) - } - - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); - } - - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would - /// occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(weight).ok_or(())?; - Ok(()) - } - - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of - /// `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); - } - - /// Get the current weight of a specific dispatch class. - pub fn get(&self, class: DispatchClass) -> Weight { - match class { - DispatchClass::Operational => self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => self.normal, - } - } - - /// Get a mutable reference to the current weight of a specific dispatch class. - fn get_mut(&mut self, class: DispatchClass) -> &mut Weight { - match class { - DispatchClass::Operational => &mut self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => &mut self.normal, - } - } - - /// Set the weight of a specific dispatch class. - pub fn put(&mut self, new: Weight, class: DispatchClass) { - *self.get_mut(class) = new; - } -} diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 5f3c84deb41c7..f28e90b34c38b 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -54,7 +54,7 @@ pub trait WeightInfo { /// Weights for frame_system using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn remark(_b: u32, ) -> Weight { (1_973_000 as Weight) } diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 5a99c5d02c5af..f4f7bbda0f885 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-timestamp" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,20 +16,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -impl-trait-for-tuples = "0.1.3" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../primitives/timestamp" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md index 5610caca4da51..de1fb74392225 100644 --- a/frame/timestamp/README.md +++ b/frame/timestamp/README.md @@ -2,9 +2,9 @@ The Timestamp module provides functionality to get and set the on-chain time. -- [`timestamp::Trait`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/enum.Call.html) -- [`Module`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/struct.Module.html) +- [`timestamp::Trait`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/enum.Call.html) +- [`Module`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/struct.Module.html) ## Overview @@ -29,7 +29,7 @@ because of cumulative calculation errors and hence should be avoided. * `get` - Gets the current time for the current block. If this function is called prior to setting the timestamp, it will return the timestamp of the previous block. -### Trait Getters +### Config Getters * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. @@ -48,10 +48,10 @@ trait from the timestamp trait. use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; -pub trait Trait: timestamp::Trait {} +pub trait Config: timestamp::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn get_time(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -69,6 +69,6 @@ the Timestamp module for session management. ## Related Modules -* [Session](https://docs.rs/pallet-timestamppallet-session/latest/pallet_session/) +* [Session](https://docs.rs/pallet-session/latest/pallet_session/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index a0700179a9336..ad249cbae69f2 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,12 +30,10 @@ use crate::Module as Timestamp; const MAX_TIME: u32 = 100; benchmarks! { - _ { } - set { let t = MAX_TIME; // Ignore write to `DidUpdate` since it transient. - let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + let did_update_key = crate::DidUpdate::::hashed_key().to_vec(); frame_benchmarking::benchmarking::add_to_whitelist(TrackedStorageKey { key: did_update_key, has_been_read: false, @@ -49,13 +47,13 @@ benchmarks! { on_finalize { let t = MAX_TIME; Timestamp::::set(RawOrigin::None.into(), t.into())?; - ensure!(DidUpdate::exists(), "Time was not set."); + ensure!(DidUpdate::::exists(), "Time was not set."); // Ignore read/write to `DidUpdate` since it is transient. - let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + let did_update_key = crate::DidUpdate::::hashed_key().to_vec(); frame_benchmarking::benchmarking::add_to_whitelist(did_update_key.into()); }: { Timestamp::::on_finalize(t.into()); } verify { - ensure!(!DidUpdate::exists(), "Time was not removed."); + ensure!(!DidUpdate::::exists(), "Time was not removed."); } } diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index d546a34017d0a..86ca0c11a70c8 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,23 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Timestamp Module +//! # Timestamp Pallet //! -//! The Timestamp module provides functionality to get and set the on-chain time. +//! The Timestamp pallet provides functionality to get and set the on-chain time. //! -//! - [`timestamp::Trait`](./trait.Trait.html) +//! - [`timestamp::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Pallet`](./struct.Pallet.html) //! //! ## Overview //! -//! The Timestamp module allows the validators to set and validate a timestamp with each block. +//! The Timestamp pallet allows the validators to set and validate a timestamp with each block. //! //! It uses inherents for timestamp data, which is provided by the block author and validated/verified //! by other validators. The timestamp can be set only once per block and must be set each block. //! There could be a constraint on how much time must pass before setting the new timestamp. //! -//! **NOTE:** The Timestamp module is the recommended way to query the on-chain time instead of using +//! **NOTE:** The Timestamp pallet is the recommended way to query the on-chain time instead of using //! an approach based on block numbers. The block number based time measurement can cause issues //! because of cumulative calculation errors and hence should be avoided. //! @@ -46,17 +46,17 @@ //! * `get` - Gets the current time for the current block. If this function is called prior to //! setting the timestamp, it will return the timestamp of the previous block. //! -//! ### Trait Getters +//! ### Config Getters //! //! * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. //! //! ## Usage //! -//! The following example shows how to use the Timestamp module in your custom module to query the current timestamp. +//! The following example shows how to use the Timestamp pallet in your custom pallet to query the current timestamp. //! //! ### Prerequisites //! -//! Import the Timestamp module into your custom module and derive the module configuration +//! Import the Timestamp pallet into your custom pallet and derive the pallet configuration //! trait from the timestamp trait. //! //! ### Get current timestamp @@ -66,10 +66,10 @@ //! # use pallet_timestamp as timestamp; //! use frame_system::ensure_signed; //! -//! pub trait Trait: timestamp::Trait {} +//! pub trait Config: timestamp::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn get_time(origin) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; @@ -83,10 +83,10 @@ //! //! ### Example from the FRAME //! -//! The [Session module](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses -//! the Timestamp module for session management. +//! The [Session pallet](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses +//! the Timestamp pallet for session management. //! -//! ## Related Modules +//! ## Related Pallets //! //! * [Session](../pallet_session/index.html) @@ -96,54 +96,83 @@ mod benchmarking; pub mod weights; use sp_std::{result, cmp}; -use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; +use sp_inherents::InherentData; #[cfg(feature = "std")] use frame_support::debug; -use frame_support::{ - Parameter, decl_storage, decl_module, - traits::{Time, UnixTime, Get}, - weights::{DispatchClass, Weight}, -}; +use frame_support::traits::{Time, UnixTime}; use sp_runtime::{ RuntimeString, traits::{ AtLeast32Bit, Zero, SaturatedConversion, Scale, } }; -use frame_system::ensure_none; use sp_timestamp::{ InherentError, INHERENT_IDENTIFIER, InherentType, OnTimestampSet, }; pub use weights::WeightInfo; -/// The module configuration trait -pub trait Trait: frame_system::Trait { - /// Type used for expressing timestamp. - type Moment: Parameter + Default + AtLeast32Bit - + Scale + Copy; +pub use pallet::*; - /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. - type OnTimestampSet: OnTimestampSet; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// The minimum period between blocks. Beware that this is different to the *expected* period - /// that the block production apparatus provides. Your chosen consensus system will generally - /// work with this to determine a sensible block time. e.g. For Aura, it will be double this - /// period on default settings. - type MinimumPeriod: Get; + /// The pallet configuration trait + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type used for expressing timestamp. + type Moment: Parameter + Default + AtLeast32Bit + + Scale + Copy; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. + type OnTimestampSet: OnTimestampSet; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { /// The minimum period between blocks. Beware that this is different to the *expected* period /// that the block production apparatus provides. Your chosen consensus system will generally /// work with this to determine a sensible block time. e.g. For Aura, it will be double this /// period on default settings. - const MinimumPeriod: T::Moment = T::MinimumPeriod::get(); + #[pallet::constant] + type MinimumPeriod: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + /// Current time for the current block. + #[pallet::storage] + #[pallet::getter(fn now)] + pub type Now = StorageValue<_, T::Moment, ValueQuery>; + + /// Did the timestamp get updated in this block? + #[pallet::storage] + pub(super) type DidUpdate = StorageValue<_, bool, ValueQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet { + /// dummy `on_initialize` to return the weight used in `on_finalize`. + fn on_initialize(_n: BlockNumberFor) -> Weight { + // weight of `on_finalize` + T::WeightInfo::on_finalize() + } + /// # + /// - `O(1)` + /// - 1 storage deletion (codec `O(1)`). + /// # + fn on_finalize(_n: BlockNumberFor) { + assert!(DidUpdate::::take(), "Timestamp must be updated once in the block"); + } + } + + #[pallet::call] + impl Pallet { /// Set the current time. /// /// This call should be invoked exactly once per block. It will panic at the finalization @@ -159,51 +188,65 @@ decl_module! { /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in `on_finalize`) /// - 1 event handler `on_timestamp_set`. Must be `O(1)`. /// # - #[weight = ( + #[pallet::weight(( T::WeightInfo::set(), DispatchClass::Mandatory - )] - fn set(origin, #[compact] now: T::Moment) { + ))] + pub(super) fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResultWithPostInfo { ensure_none(origin)?; - assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); + assert!(!DidUpdate::::exists(), "Timestamp must be updated only once in the block"); let prev = Self::now(); assert!( prev.is_zero() || now >= prev + T::MinimumPeriod::get(), "Timestamp must increment by at least between sequential blocks" ); - ::Now::put(now); - ::DidUpdate::put(true); + Now::::put(now); + DidUpdate::::put(true); >::on_timestamp_set(now); - } - /// dummy `on_initialize` to return the weight used in `on_finalize`. - fn on_initialize() -> Weight { - // weight of `on_finalize` - T::WeightInfo::on_finalize() + Ok(().into()) } + } - /// # - /// - `O(1)` - /// - 1 storage deletion (codec `O(1)`). - /// # - fn on_finalize() { - assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let data: T::Moment = extract_inherent_data(data) + .expect("Gets and decodes timestamp inherent data") + .saturated_into(); + + let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + Some(Call::set(next_time.into())) } - } -} -decl_storage! { - trait Store for Module as Timestamp { - /// Current time for the current block. - pub Now get(fn now): T::Moment; + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + const MAX_TIMESTAMP_DRIFT_MILLIS: u64 = 30 * 1000; - /// Did the timestamp get updated in this block? - DidUpdate: bool; + let t: u64 = match call { + Call::set(ref t) => t.clone().saturated_into::(), + _ => return Ok(()), + }; + + let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; + + let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + if t > data + MAX_TIMESTAMP_DRIFT_MILLIS { + Err(InherentError::Other("Timestamp too far in future to accept".into())) + } else if t < minimum { + Err(InherentError::ValidAtTimestamp(minimum)) + } else { + Ok(()) + } + } } } -impl Module { +impl Pallet { /// Get the current time for the current block. /// /// NOTE: if this function is called prior to setting the timestamp, @@ -215,7 +258,7 @@ impl Module { /// Set the timestamp to something in particular. Only used for tests. #[cfg(feature = "std")] pub fn set_timestamp(now: T::Moment) { - ::Now::put(now); + Now::::put(now); } } @@ -225,42 +268,7 @@ fn extract_inherent_data(data: &InherentData) -> Result ProvideInherent for Module { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let data: T::Moment = extract_inherent_data(data) - .expect("Gets and decodes timestamp inherent data") - .saturated_into(); - - let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); - Some(Call::set(next_time.into())) - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - const MAX_TIMESTAMP_DRIFT_MILLIS: u64 = 30 * 1000; - - let t: u64 = match call { - Call::set(ref t) => t.clone().saturated_into::(), - _ => return Ok(()), - }; - - let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; - - let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); - if t > data + MAX_TIMESTAMP_DRIFT_MILLIS { - Err(InherentError::Other("Timestamp too far in future to accept".into())) - } else if t < minimum { - Err(InherentError::ValidAtTimestamp(minimum)) - } else { - Ok(()) - } - } -} - -impl Time for Module { +impl Time for Pallet { type Moment = T::Moment; /// Before the first set of now with inherent the value returned is zero. @@ -272,7 +280,7 @@ impl Time for Module { /// Before the timestamp inherent is applied, it returns the time of previous block. /// /// On genesis the time returned is not valid. -impl UnixTime for Module { +impl UnixTime for Pallet { fn now() -> core::time::Duration { // now is duration since unix epoch in millisecond as documented in // `sp_timestamp::InherentDataProvider`. @@ -290,67 +298,71 @@ impl UnixTime for Module { #[cfg(test)] mod tests { + use crate as pallet_timestamp; use super::*; - use frame_support::{impl_outer_origin, assert_ok, parameter_types, weights::Weight}; + use frame_support::{assert_ok, parameter_types}; use sp_io::TestExternalities; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; pub fn new_test_ext() -> TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); TestExternalities::new(t) } - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const MinimumPeriod: u64 = 5; } - impl Trait for Test { + impl Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } - type Timestamp = Module; #[test] fn timestamp_works() { diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 67ce28ba9111f..8cc40faecc932 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,7 +50,7 @@ pub trait WeightInfo { /// Weights for pallet_timestamp using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn set() -> Weight { (11_650_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml new file mode 100644 index 0000000000000..92af65ce07650 --- /dev/null +++ b/frame/tips/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-tips" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage tips" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-treasury = { version = "3.0.0", default-features = false, path = "../treasury" } + +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } +pallet-balances = { version = "3.0.0", path = "../balances" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-treasury/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/tips/README.md b/frame/tips/README.md new file mode 100644 index 0000000000000..36148e276edc2 --- /dev/null +++ b/frame/tips/README.md @@ -0,0 +1,33 @@ +# Tipping Module ( pallet-tips ) + +**Note :: This pallet is tightly coupled to pallet-treasury** + +A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first +having a pre-determined stakeholder group come to consensus on how much should be paid. + +A group of `Tippers` is determined through the config `Trait`. After half of these have declared +some amount that they believe a particular reported reason deserves, then a countdown period is +entered where any remaining members can declare their tip amounts also. After the close of the +countdown period, the median of all declared tips is paid to the reported beneficiary, along with +any finders fee, in case of a public (and bonded) original report. + +### Terminology + +- **Tipping:** The process of gathering declarations of amounts to tip and taking the median amount + to be transferred from the treasury to a beneficiary account. +- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a particular + individual (identified by an account ID) is worthy of a recognition by the treasury. +- **Finder:** The original public reporter of some reason for tipping. +- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, + rather than the main beneficiary. + +## Interface + +### Dispatchable Functions + +- `report_awesome` - Report something worthy of a tip and register for a finders fee. +- `retract_tip` - Retract a previous (finders fee registered) report. +- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +- `tip` - Declare or redeclare an amount to tip for a particular reason. +- `close_tip` - Close and pay out a tip. +- `slash_tip` - Remove and slash an already-open tip. \ No newline at end of file diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs new file mode 100644 index 0000000000000..e05afc0b2ab20 --- /dev/null +++ b/frame/tips/src/benchmarking.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury tips benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use sp_runtime::{traits::{Saturating}}; + +use crate::Module as TipsMod; + +const SEED: u32 = 0; + +// Create the pre-requisite information needed to create a `report_awesome`. +fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { + let caller = whitelisted_caller(); + let value = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); + let _ = T::Currency::make_free_balance_be(&caller, value); + let reason = vec![0; length as usize]; + let awesome_person = account("awesome", 0, SEED); + (caller, reason, awesome_person) +} + +// Create the pre-requisite information needed to call `tip_new`. +fn setup_tip(r: u32, t: u32) -> + Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> +{ + let tippers_count = T::Tippers::count(); + + for i in 0 .. t { + let member = account("member", i, SEED); + T::Tippers::add(&member); + ensure!(T::Tippers::contains(&member), "failed to add tipper"); + } + + ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); + let caller = account("member", t - 1, SEED); + let reason = vec![0; r as usize]; + let beneficiary = account("beneficiary", t, SEED); + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + Ok((caller, reason, beneficiary, value)) +} + +// Create `t` new tips for the tip proposal with `hash`. +// This function automatically makes the tip able to close. +fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> + Result<(), &'static str> +{ + for i in 0 .. t { + let caller = account("member", i, SEED); + ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); + TipsMod::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + } + Tips::::mutate(hash, |maybe_tip| { + if let Some(open_tip) = maybe_tip { + open_tip.closes = Some(T::BlockNumber::zero()); + } + }); + Ok(()) +} + +fn setup_pot_account() { + let pot_account = TipsMod::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + +const MAX_BYTES: u32 = 16384; +const MAX_TIPPERS: u32 = 100; + +benchmarks! { + report_awesome { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), reason, awesome_person) + + retract_tip { + let r = MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + TipsMod::::report_awesome( + RawOrigin::Signed(caller.clone()).into(), + reason.clone(), + awesome_person.clone() + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash) + + tip_new { + let r in 0 .. MAX_BYTES; + let t in 1 .. MAX_TIPPERS; + + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + + tip { + let t in 1 .. MAX_TIPPERS; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash.clone(), value)?; + let caller = account("member", t - 1, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash, value) + + close_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + setup_pot_account::(); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + // Create a bunch of tips + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + + create_tips::(t, hash.clone(), value)?; + + let caller = account("caller", t, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash) + + slash_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + setup_pot_account::(); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + }: _(RawOrigin::Root, hash) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_report_awesome::()); + assert_ok!(test_benchmark_retract_tip::()); + assert_ok!(test_benchmark_tip_new::()); + assert_ok!(test_benchmark_tip::()); + assert_ok!(test_benchmark_close_tip::()); + assert_ok!(test_benchmark_slash_tip::()); + }); + } +} diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs new file mode 100644 index 0000000000000..442df89428fcc --- /dev/null +++ b/frame/tips/src/lib.rs @@ -0,0 +1,578 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Tipping Module ( pallet-tips ) +//! +//! > NOTE: This pallet is tightly coupled with pallet-treasury. +//! +//! A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first +//! having a pre-determined stakeholder group come to consensus on how much should be paid. +//! +//! A group of `Tippers` is determined through the config `Config`. After half of these have declared +//! some amount that they believe a particular reported reason deserves, then a countdown period is +//! entered where any remaining members can declare their tip amounts also. After the close of the +//! countdown period, the median of all declared tips is paid to the reported beneficiary, along +//! with any finders fee, in case of a public (and bonded) original report. +//! +//! +//! ### Terminology +//! +//! Tipping protocol: +//! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median +//! amount to be transferred from the treasury to a beneficiary account. +//! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a +//! particular individual (identified by an account ID) is worthy of a recognition by the +//! treasury. +//! - **Finder:** The original public reporter of some reason for tipping. +//! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, +//! rather than the main beneficiary. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! Tipping protocol: +//! - `report_awesome` - Report something worthy of a tip and register for a finders fee. +//! - `retract_tip` - Retract a previous (finders fee registered) report. +//! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +//! - `tip` - Declare or redeclare an amount to tip for a particular reason. +//! - `close_tip` - Close and pay out a tip. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod tests; +mod benchmarking; +pub mod weights; + +use sp_std::prelude::*; +use frame_support::{decl_module, decl_storage, decl_event, ensure, decl_error, Parameter}; +use frame_support::traits::{ + Currency, Get, ExistenceRequirement::{KeepAlive}, + ReservableCurrency +}; + +use sp_runtime::{ Percent, RuntimeDebug, traits::{ + Zero, AccountIdConversion, Hash, BadOrigin +}}; +use frame_support::traits::{Contains, ContainsLengthBound, OnUnbalanced, EnsureOrigin}; +use codec::{Encode, Decode}; +use frame_system::{self as system, ensure_signed}; +pub use weights::WeightInfo; + +pub type BalanceOf = pallet_treasury::BalanceOf; +pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; + +pub trait Config: frame_system::Config + pallet_treasury::Config { + /// Maximum acceptable reason length. + type MaximumReasonLength: Get; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + type DataDepositPerByte: Get>; + + /// Origin from which tippers must come. + /// + /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy operation). + type Tippers: Contains + ContainsLengthBound; + + /// The period for which a tip remains open after is has achieved threshold tippers. + type TipCountdown: Get; + + /// The percent of the final tip which goes to the original reporter of the tip. + type TipFindersFee: Get; + + /// The amount held on deposit for placing a tip report. + type TipReportDepositBase: Get>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +/// An open tipping "motion". Retains all details of a tip including information on the finder +/// and the members who have voted. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] +pub struct OpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, +> { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip. + finder: AccountId, + /// The amount held on deposit for this tip. + deposit: Balance, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + /// Whether this tip should result in the finder taking a fee. + finders_fee: bool, +} + +// Note :: For backward compatability reasons, +// pallet-tips uses Treasury for storage. +// This is temporary solution, soon will get replaced with +// Own storage identifier. +decl_storage! { + trait Store for Module as Treasury { + + /// TipsMap that are not yet completed. Keyed by the hash of `(reason, who)` from the value. + /// This has the insecure enumerable hash function since the key itself is already + /// guaranteed to be a secure hash. + pub Tips get(fn tips): + map hasher(twox_64_concat) T::Hash + => Option, T::BlockNumber, T::Hash>>; + + /// Simple preimage lookup from the reason's hash to the original data. Again, has an + /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. + pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; + + } +} + +decl_event!( + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + ::Hash, + { + /// A new tip suggestion has been opened. \[tip_hash\] + NewTip(Hash), + /// A tip suggestion has reached threshold and is closing. \[tip_hash\] + TipClosing(Hash), + /// A tip suggestion has been closed. \[tip_hash, who, payout\] + TipClosed(Hash, AccountId, Balance), + /// A tip suggestion has been retracted. \[tip_hash\] + TipRetracted(Hash), + /// A tip suggestion has been slashed. \[tip_hash, finder, deposit\] + TipSlashed(Hash, AccountId, Balance), + } +); + +decl_error! { + /// Error for the tips module. + pub enum Error for Module { + /// The reason given is just too big. + ReasonTooBig, + /// The tip was already found/started. + AlreadyKnown, + /// The tip hash is unknown. + UnknownTip, + /// The account attempting to retract the tip is not the finder of the tip. + NotFinder, + /// The tip cannot be claimed/closed because there are not enough tippers yet. + StillOpen, + /// The tip cannot be claimed/closed because it's still in the countdown period. + Premature, + } +} + +decl_module! { + pub struct Module + for enum Call + where origin: T::Origin + { + + /// The period for which a tip remains open after is has achieved threshold tippers. + const TipCountdown: T::BlockNumber = T::TipCountdown::get(); + + /// The amount of the final tip which goes to the original reporter of the tip. + const TipFindersFee: Percent = T::TipFindersFee::get(); + + /// The amount held on deposit for placing a tip report. + const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); + + /// The amount held on deposit per byte within the tip report reason. + const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); + + /// Maximum acceptable reason length. + const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - Complexity: `O(R)` where `R` length of `reason`. + /// - encoding and hashing of 'reason' + /// - DbReads: `Reasons`, `Tips` + /// - DbWrites: `Reasons`, `Tips` + /// # + #[weight = ::WeightInfo::report_awesome(reason.len() as u32)] + fn report_awesome(origin, reason: Vec, who: T::AccountId) { + let finder = ensure_signed(origin)?; + + ensure!(reason.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); + + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); + + let deposit = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * (reason.len() as u32).into(); + T::Currency::reserve(&finder, deposit)?; + + Reasons::::insert(&reason_hash, &reason); + let tip = OpenTip { + reason: reason_hash, + who, + finder, + deposit, + closes: None, + tips: vec![], + finders_fee: true + }; + Tips::::insert(&hash, tip); + Self::deposit_event(RawEvent::NewTip(hash)); + } + + /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. + /// + /// If successful, the original deposit will be unreserved. + /// + /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` + /// must have been reported by the signing account through `report_awesome` (and not + /// through `tip_new`). + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// Emits `TipRetracted` if successful. + /// + /// # + /// - Complexity: `O(1)` + /// - Depends on the length of `T::Hash` which is fixed. + /// - DbReads: `Tips`, `origin account` + /// - DbWrites: `Reasons`, `Tips`, `origin account` + /// # + #[weight = ::WeightInfo::retract_tip()] + fn retract_tip(origin, hash: T::Hash) { + let who = ensure_signed(origin)?; + let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; + ensure!(tip.finder == who, Error::::NotFinder); + + Reasons::::remove(&tip.reason); + Tips::::remove(&hash); + if !tip.deposit.is_zero() { + let _ = T::Currency::unreserve(&who, tip.deposit); + } + Self::deposit_event(RawEvent::TipRetracted(hash)); + } + + /// Give a tip for something new; no finder's fee will be taken. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. + /// - `O(T)`: decoding `Tipper` vec of length `T` + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// - `O(R)`: hashing and encoding of reason of length `R` + /// - DbReads: `Tippers`, `Reasons` + /// - DbWrites: `Reasons`, `Tips` + /// # + #[weight = ::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32)] + fn tip_new(origin, reason: Vec, who: T::AccountId, #[compact] tip_value: BalanceOf) { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + + Reasons::::insert(&reason_hash, &reason); + Self::deposit_event(RawEvent::NewTip(hash.clone())); + let tips = vec![(tipper.clone(), tip_value)]; + let tip = OpenTip { + reason: reason_hash, + who, + finder: tipper, + deposit: Zero::zero(), + closes: None, + tips, + finders_fee: false, + }; + Tips::::insert(&hash, tip); + } + + /// Declare a tip value for an already-open tip. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary + /// account ID. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period + /// has started. + /// + /// # + /// - Complexity: `O(T)` where `T` is the number of tippers. + /// decoding `Tipper` vec of length `T`, insert tip and check closing, + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// + /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it + /// is weighted as if almost full i.e of length `T-1`. + /// - DbReads: `Tippers`, `Tips` + /// - DbWrites: `Tips` + /// # + #[weight = ::WeightInfo::tip(T::Tippers::max_len() as u32)] + fn tip(origin, hash: T::Hash, #[compact] tip_value: BalanceOf) { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + + let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { + Self::deposit_event(RawEvent::TipClosing(hash.clone())); + } + Tips::::insert(&hash, tip); + } + + /// Close and payout a tip. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The tip identified by `hash` must have finished its countdown period. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// # + /// - Complexity: `O(T)` where `T` is the number of tippers. + /// decoding `Tipper` vec of length `T`. + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// - DbReads: `Tips`, `Tippers`, `tip finder` + /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` + /// # + #[weight = ::WeightInfo::close_tip(T::Tippers::max_len() as u32)] + fn close_tip(origin, hash: T::Hash) { + ensure_signed(origin)?; + + let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; + ensure!(system::Module::::block_number() >= *n, Error::::Premature); + // closed. + Reasons::::remove(&tip.reason); + Tips::::remove(hash); + Self::payout_tip(hash, tip); + } + + /// Remove and slash an already-open tip. + /// + /// May only be called from `T::RejectOrigin`. + /// + /// As a result, the finder is slashed and the deposits are lost. + /// + /// Emits `TipSlashed` if successful. + /// + /// # + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// # + #[weight = ::WeightInfo::slash_tip(T::Tippers::max_len() as u32)] + fn slash_tip(origin, hash: T::Hash) { + T::RejectOrigin::ensure_origin(origin)?; + + let tip = Tips::::take(hash).ok_or(Error::::UnknownTip)?; + + if !tip.deposit.is_zero() { + let imbalance = T::Currency::slash_reserved(&tip.finder, tip.deposit).0; + T::OnSlash::on_unbalanced(imbalance); + } + Reasons::::remove(&tip.reason); + Self::deposit_event(RawEvent::TipSlashed(hash, tip.finder, tip.deposit)); + } + } +} + +impl Module { + // Add public immutables and private mutables. + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::ModuleId::get().into_account() + } + + /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it + /// closes, if so, then deposit the relevant event and set closing accordingly. + /// + /// `O(T)` and one storage access. + fn insert_tip_and_check_closing( + tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tipper: T::AccountId, + tip_value: BalanceOf, + ) -> bool { + match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { + Ok(pos) => tip.tips[pos] = (tipper, tip_value), + Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), + } + Self::retain_active_tips(&mut tip.tips); + let threshold = (T::Tippers::count() + 1) / 2; + if tip.tips.len() >= threshold && tip.closes.is_none() { + tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); + true + } else { + false + } + } + + /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. + fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { + let members = T::Tippers::sorted_members(); + let mut members_iter = members.iter(); + let mut member = members_iter.next(); + tips.retain(|(ref a, _)| loop { + match member { + None => break false, + Some(m) if m > a => break false, + Some(m) => { + member = members_iter.next(); + if m < a { + continue + } else { + break true; + } + } + } + }); + } + + /// Execute the payout of a tip. + /// + /// Up to three balance operations. + /// Plus `O(T)` (`T` is Tippers length). + fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { + let mut tips = tip.tips; + Self::retain_active_tips(&mut tips); + tips.sort_by_key(|i| i.1); + + let treasury = Self::account_id(); + let max_payout = pallet_treasury::Module::::pot(); + + let mut payout = tips[tips.len() / 2].1.min(max_payout); + if !tip.deposit.is_zero() { + let _ = T::Currency::unreserve(&tip.finder, tip.deposit); + } + + if tip.finders_fee && tip.finder != tip.who { + // pay out the finder's fee. + let finders_fee = T::TipFindersFee::get() * payout; + payout -= finders_fee; + // this should go through given we checked it's at most the free balance, but still + // we only make a best-effort. + let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); + } + + // same as above: best-effort only. + let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); + Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); + } + + pub fn migrate_retract_tip_for_tip_new() { + /// An open tipping "motion". Retains all details of a tip including information on the finder + /// and the members who have voted. + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + use frame_support::{Twox64Concat, migration::StorageKeyIterator}; + + for (hash, old_tip) in StorageKeyIterator::< + T::Hash, + OldOpenTip, T::BlockNumber, T::Hash>, + Twox64Concat, + >::new(b"Treasury", b"Tips").drain() + { + + let (finder, deposit, finders_fee) = match old_tip.finder { + Some((finder, deposit)) => { + (finder, deposit, true) + }, + None => { + (T::AccountId::default(), Zero::zero(), false) + }, + }; + let new_tip = OpenTip { + reason: old_tip.reason, + who: old_tip.who, + finder, + deposit, + closes: old_tip.closes, + tips: old_tip.tips, + finders_fee + }; + Tips::::insert(hash, new_tip) + } + } +} diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs new file mode 100644 index 0000000000000..413e2dd9437e2 --- /dev/null +++ b/frame/tips/src/tests.rs @@ -0,0 +1,486 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury pallet tests. + +#![cfg(test)] + +use crate as tips; +use super::*; +use std::cell::RefCell; +use frame_support::{assert_noop, assert_ok, parameter_types, weights::Weight, traits::Contains}; +use sp_runtime::Permill; +use sp_core::H256; +use sp_runtime::{ + Perbill, ModuleId, + testing::Header, + traits::{BlakeTwo256, IdentityLookup, BadOrigin}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, + TipsModTestInst: tips::{Module, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +pub struct TenToFourteen; +impl Contains for TenToFourteen { + fn sorted_members() -> Vec { + TEN_TO_FOURTEEN.with(|v| { + v.borrow().clone() + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn add(new: &u128) { + TEN_TO_FOURTEEN.with(|v| { + let mut members = v.borrow_mut(); + members.push(*new); + members.sort(); + }) + } +} +impl ContainsLengthBound for TenToFourteen { + fn max_len() -> usize { + TEN_TO_FOURTEEN.with(|v| v.borrow().len()) + } + fn min_len() -> usize { 0 } +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const DataDepositPerByte: u64 = 1; + pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const MaximumReasonLength: u32 = 16384; +} +impl pallet_treasury::Config for Test { + type ModuleId = TreasuryModuleId; + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = (); +} +parameter_types! { + pub const TipCountdown: u64 = 1; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: u64 = 1; +} +impl Config for Test { + type MaximumReasonLength = MaximumReasonLength; + type Tippers = TenToFourteen; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type DataDepositPerByte = DataDepositPerByte; + type Event = Event; + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + t.into() +} + +fn last_event() -> RawEvent { + System::events().into_iter().map(|r| r.event) + .filter_map(|e| { + if let Event::tips(inner) = e { Some(inner) } else { None } + }) + .last() + .unwrap() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +fn tip_hash() -> H256 { + BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) +} + +#[test] +fn tip_new_cannot_be_used_twice() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_noop!( + TipsModTestInst::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Error::::AlreadyKnown + ); + }); +} + +#[test] +fn report_awesome_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + // other reports don't count. + assert_noop!( + TipsModTestInst::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Error::::AlreadyKnown + ); + + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(TipsModTestInst::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 102); + assert_eq!(Balances::free_balance(3), 8); + }); +} + +#[test] +fn report_awesome_from_beneficiary_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 110); + }); +} + +#[test] +fn close_tip_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + + let h = tip_hash(); + + assert_eq!(last_event(), RawEvent::NewTip(h)); + + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + + assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + + assert_eq!(last_event(), RawEvent::TipClosing(h)); + + assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::Premature); + + System::set_block_number(2); + assert_noop!(TipsModTestInst::close_tip(Origin::none(), h.into()), BadOrigin); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + + assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); + + assert_noop!(TipsModTestInst::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn slash_tip_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100); + + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + let h = tip_hash(); + assert_eq!(last_event(), RawEvent::NewTip(h)); + + // can't remove from any origin + assert_noop!( + TipsModTestInst::slash_tip(Origin::signed(0), h.clone()), + BadOrigin, + ); + + // can remove from root. + assert_ok!(TipsModTestInst::slash_tip(Origin::root(), h.clone())); + assert_eq!(last_event(), RawEvent::TipSlashed(h, 0, 12)); + + // tipper slashed + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 88); + }); +} + +#[test] +fn retract_tip_works() { + new_test_ext().execute_with(|| { + // with report awesome + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(TipsModTestInst::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_ok!(TipsModTestInst::retract_tip(Origin::signed(0), h.clone())); + System::set_block_number(2); + assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + + // with tip new + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(TipsModTestInst::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_ok!(TipsModTestInst::retract_tip(Origin::signed(10), h.clone())); + System::set_block_number(2); + assert_noop!(TipsModTestInst::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn tip_median_calculation_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 1000000)); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn tip_changing_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10000)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10000)); + assert_ok!(TipsModTestInst::tip(Origin::signed(13), h.clone(), 0)); + assert_ok!(TipsModTestInst::tip(Origin::signed(14), h.clone(), 0)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 1000)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 100)); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn test_last_reward_migration() { + use sp_storage::Storage; + + let mut s = Storage::default(); + + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + let reason1 = BlakeTwo256::hash(b"reason1"); + let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); + + let old_tip_finder = OldOpenTip:: { + reason: reason1, + who: 10, + finder: Some((20, 30)), + closes: Some(13), + tips: vec![(40, 50), (60, 70)] + }; + + let reason2 = BlakeTwo256::hash(b"reason2"); + let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); + + let old_tip_no_finder = OldOpenTip:: { + reason: reason2, + who: 20, + finder: None, + closes: Some(13), + tips: vec![(40, 50), (60, 70)] + }; + + let data = vec![ + ( + Tips::::hashed_key_for(hash1), + old_tip_finder.encode().to_vec() + ), + ( + Tips::::hashed_key_for(hash2), + old_tip_no_finder.encode().to_vec() + ), + ]; + + s.top = data.into_iter().collect(); + + sp_io::TestExternalities::new(s).execute_with(|| { + + TipsModTestInst::migrate_retract_tip_for_tip_new(); + + // Test w/ finder + assert_eq!( + Tips::::get(hash1), + Some(OpenTip { + reason: reason1, + who: 10, + finder: 20, + deposit: 30, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: true, + }) + ); + + // Test w/o finder + assert_eq!( + Tips::::get(hash2), + Some(OpenTip { + reason: reason2, + who: 20, + finder: Default::default(), + deposit: 0, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: false, + }) + ); + }); +} + +#[test] +fn genesis_funding_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let initial_funding = 100; + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized with 100. + balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding); + assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); + }); +} diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs new file mode 100644 index 0000000000000..94c12f740c043 --- /dev/null +++ b/frame/tips/src/weights.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_tips +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2020-12-20, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_tips +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/tips/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_tips. +pub trait WeightInfo { + fn report_awesome(r: u32, ) -> Weight; + fn retract_tip() -> Weight; + fn tip_new(r: u32, t: u32, ) -> Weight; + fn tip(t: u32, ) -> Weight; + fn close_tip(t: u32, ) -> Weight; + fn slash_tip(t: u32, ) -> Weight; +} + +/// Weights for pallet_tips using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn report_awesome(r: u32, ) -> Weight { + (73_795_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn retract_tip() -> Weight { + (61_753_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn tip_new(r: u32, t: u32, ) -> Weight { + (47_731_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((154_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn tip(t: u32, ) -> Weight { + (35_215_000 as Weight) + // Standard Error: 1_000 + .saturating_add((712_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn close_tip(t: u32, ) -> Weight { + (117_027_000 as Weight) + // Standard Error: 1_000 + .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn slash_tip(t: u32, ) -> Weight { + (37_184_000 as Weight) + // Standard Error: 0 + .saturating_add((11_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn report_awesome(r: u32, ) -> Weight { + (73_795_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn retract_tip() -> Weight { + (61_753_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn tip_new(r: u32, t: u32, ) -> Weight { + (47_731_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((154_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn tip(t: u32, ) -> Weight { + (35_215_000 as Weight) + // Standard Error: 1_000 + .saturating_add((712_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn close_tip(t: u32, ) -> Weight { + (117_027_000 as Weight) + // Standard Error: 1_000 + .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn slash_tip(t: u32, ) -> Weight { + (37_184_000 as Weight) + // Standard Error: 0 + .saturating_add((11_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 1fa4521900421..7a713ab1cfbd1 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "./rpc/runtime-api" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } smallvec = "1.4.1" -sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } -sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } +sp-io = { version = "3.0.0", path = "../../primitives/io", default-features = false } +sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +serde_json = "1.0.41" +pallet-balances = { version = "3.0.0", path = "../balances" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } [features] default = ["std"] @@ -37,7 +37,6 @@ std = [ "sp-runtime/std", "frame-support/std", "frame-system/std", - "pallet-transaction-payment-rpc-runtime-api/std", "sp-io/std", "sp-core/std", ] diff --git a/frame/transaction-payment/README.md b/frame/transaction-payment/README.md index 10ad9579e92b7..7e95677a1b272 100644 --- a/frame/transaction-payment/README.md +++ b/frame/transaction-payment/README.md @@ -8,9 +8,9 @@ transaction to be included. This includes: chance to be included by the transaction queue. Additionally, this module allows one to configure: - - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. + - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. - A means of updating the fee for the next block, via defining a multiplier, based on the final state of the chain at the end of the previous block. This can be configured via - [`Trait::FeeMultiplierUpdate`] + [`Config::FeeMultiplierUpdate`] License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 26f073e60237f..102f91dcc2c08 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,14 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } -serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", path = "./runtime-api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sp-rpc = { version = "3.0.0", path = "../../../primitives/rpc" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 881c4330eb9a4..fede9f9dd0267 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,23 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../support" } - -[dev-dependencies] -serde_json = "1.0.41" +sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../../../primitives/runtime" } +pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../transaction-payment" } [features] default = ["std"] std = [ - "serde", "sp-api/std", "codec/std", - "sp-std/std", "sp-runtime/std", - "frame-support/std", + "pallet-transaction-payment/std", ] diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 5575f8f7d0950..bd05aec30333e 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,85 +19,16 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use frame_support::weights::{Weight, DispatchClass}; -use codec::{Encode, Codec, Decode}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; -use sp_runtime::traits::{MaybeDisplay, MaybeFromStr}; +use codec::Codec; +use sp_runtime::traits::MaybeDisplay; -/// Information related to a dispatchable's class, weight, and fee that can be queried from the runtime. -#[derive(Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -pub struct RuntimeDispatchInfo { - /// Weight of this dispatch. - pub weight: Weight, - /// Class of this dispatch. - pub class: DispatchClass, - /// The inclusion fee of this dispatch. This does not include a tip or anything else that - /// depends on the signature (i.e. depends on a `SignedExtension`). - #[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] - #[cfg_attr(feature = "std", serde(serialize_with = "serialize_as_string"))] - #[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] - #[cfg_attr(feature = "std", serde(deserialize_with = "deserialize_from_string"))] - pub partial_fee: Balance, -} - -#[cfg(feature = "std")] -fn serialize_as_string(t: &T, serializer: S) -> Result { - serializer.serialize_str(&t.to_string()) -} - -#[cfg(feature = "std")] -fn deserialize_from_string<'de, D: Deserializer<'de>, T: std::str::FromStr>(deserializer: D) -> Result { - let s = String::deserialize(deserializer)?; - s.parse::().map_err(|_| serde::de::Error::custom("Parse from string failed")) -} +pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; sp_api::decl_runtime_apis! { pub trait TransactionPaymentApi where - Balance: Codec + MaybeDisplay + MaybeFromStr, + Balance: Codec + MaybeDisplay, { fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_serialize_and_deserialize_properly_with_string() { - let info = RuntimeDispatchInfo { - weight: 5, - class: DispatchClass::Normal, - partial_fee: 1_000_000_u64, - }; - - let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; - - assert_eq!(serde_json::to_string(&info).unwrap(), json_str); - assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); - - // should not panic - serde_json::to_value(&info).unwrap(); - } - - #[test] - fn should_serialize_and_deserialize_properly_large_value() { - let info = RuntimeDispatchInfo { - weight: 5, - class: DispatchClass::Normal, - partial_fee: u128::max_value(), - }; - - let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; - - assert_eq!(serde_json::to_string(&info).unwrap(), json_str); - assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); - - // should not panic - serde_json::to_value(&info).unwrap(); + fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 5043f0257fc36..b3e892c165e32 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,16 @@ //! RPC interface for the transaction payment module. use std::sync::Arc; +use std::convert::TryInto; use codec::{Codec, Decode}; use sp_blockchain::HeaderBackend; use jsonrpc_core::{Error as RpcError, ErrorCode, Result}; use jsonrpc_derive::rpc; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, MaybeDisplay, MaybeFromStr}}; +use sp_runtime::{generic::BlockId, traits::{Block as BlockT, MaybeDisplay}}; use sp_api::ProvideRuntimeApi; use sp_core::Bytes; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use sp_rpc::number::NumberOrHex; +use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; pub use self::gen_client::Client as TransactionPaymentClient; @@ -37,6 +39,12 @@ pub trait TransactionPaymentApi { encoded_xt: Bytes, at: Option ) -> Result; + #[rpc(name = "payment_queryFeeDetails")] + fn query_fee_details( + &self, + encoded_xt: Bytes, + at: Option + ) -> Result>; } /// A struct that implements the [`TransactionPaymentApi`]. @@ -48,7 +56,7 @@ pub struct TransactionPayment { impl TransactionPayment { /// Create new `TransactionPayment` with the given reference to the client. pub fn new(client: Arc) -> Self { - TransactionPayment { client, _marker: Default::default() } + Self { client, _marker: Default::default() } } } @@ -69,13 +77,15 @@ impl From for i64 { } } -impl TransactionPaymentApi<::Hash, RuntimeDispatchInfo> - for TransactionPayment +impl TransactionPaymentApi< + ::Hash, + RuntimeDispatchInfo, +> for TransactionPayment where Block: BlockT, - C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + C: 'static + ProvideRuntimeApi + HeaderBackend, C::Api: TransactionPaymentRuntimeApi, - Balance: Codec + MaybeDisplay + MaybeFromStr, + Balance: Codec + MaybeDisplay + Copy + TryInto, { fn query_info( &self, @@ -101,4 +111,48 @@ where data: Some(format!("{:?}", e).into()), }) } + + fn query_fee_details( + &self, + encoded_xt: Bytes, + at: Option<::Hash>, + ) -> Result> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash + )); + + let encoded_len = encoded_xt.len() as u32; + + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::DecodeError.into()), + message: "Unable to query fee details.".into(), + data: Some(format!("{:?}", e).into()), + })?; + let fee_details = api.query_fee_details(&at, uxt, encoded_len).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to query fee details.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + let try_into_rpc_balance = |value: Balance| value.try_into().map_err(|_| RpcError { + code: ErrorCode::InvalidParams, + message: format!("{} doesn't fit in NumberOrHex representation", value), + data: None, + }); + + Ok(FeeDetails { + inclusion_fee: if let Some(inclusion_fee) = fee_details.inclusion_fee { + Some(InclusionFee { + base_fee: try_into_rpc_balance(inclusion_fee.base_fee)?, + len_fee: try_into_rpc_balance(inclusion_fee.len_fee)?, + adjusted_weight_fee: try_into_rpc_balance(inclusion_fee.adjusted_weight_fee)?, + }) + } else { + None + }, + tip: Default::default(), + }) + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index dd310c2639842..709a8f69a487d 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +19,31 @@ //! //! This module provides the basic logic needed to pay the absolute minimum amount needed for a //! transaction to be included. This includes: +//! - _base fee_: This is the minimum amount a user pays for a transaction. It is declared +//! as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. //! - _weight fee_: A fee proportional to amount of weight a transaction consumes. //! - _length fee_: A fee proportional to the encoded length of the transaction. //! - _tip_: An optional tip. Tip increases the priority of the transaction, giving it a higher //! chance to be included by the transaction queue. //! +//! The base fee and adjusted weight and length fees constitute the _inclusion fee_, which is +//! the minimum fee for a transaction to be included in a block. +//! +//! The formula of final fee: +//! ```ignore +//! inclusion_fee = base_fee + length_fee + [targeted_fee_adjustment * weight_fee]; +//! final_fee = inclusion_fee + tip; +//! ``` +//! +//! - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on +//! the congestion of the network. +//! //! Additionally, this module allows one to configure: -//! - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. +//! - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. //! - A means of updating the fee for the next block, via defining a multiplier, based on the //! final state of the chain at the end of the previous block. This can be configured via -//! [`Trait::FeeMultiplierUpdate`] -//! - How the fees are paid via [`Trait::OnChargeTransaction`]. +//! [`Config::FeeMultiplierUpdate`] +//! - How the fees are paid via [`Config::OnChargeTransaction`]. #![cfg_attr(not(feature = "std"), no_std)] @@ -40,7 +54,7 @@ use frame_support::{ traits::Get, weights::{ Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Pays, WeightToFeePolynomial, - WeightToFeeCoefficient, + WeightToFeeCoefficient, DispatchClass, }, dispatch::DispatchResult, }; @@ -54,16 +68,18 @@ use sp_runtime::{ DispatchInfoOf, PostDispatchInfoOf, }, }; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; mod payment; +mod types; + pub use payment::*; +pub use types::{InclusionFee, FeeDetails, RuntimeDispatchInfo}; /// Fee multiplier. pub type Multiplier = FixedU128; type BalanceOf = - <::OnChargeTransaction as OnChargeTransaction>::Balance; + <::OnChargeTransaction as OnChargeTransaction>::Balance; /// A struct to update the weight multiplier per block. It implements `Convert`, meaning that it can convert the previous multiplier to the next one. This should @@ -109,7 +125,7 @@ type BalanceOf = /// Meaning that fees can change by around ~23% per day, given extreme congestion. /// /// More info can be found at: -/// https://w3f-research.readthedocs.io/en/latest/polkadot/Token%20Economics.html +/// pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); /// Something that can convert the current multiplier to the next one. @@ -135,7 +151,7 @@ impl MultiplierUpdate for () { } impl MultiplierUpdate for TargetedFeeAdjustment - where T: frame_system::Trait, S: Get, V: Get, M: Get, + where T: frame_system::Config, S: Get, V: Get, M: Get, { fn min() -> Multiplier { M::get() @@ -149,7 +165,7 @@ impl MultiplierUpdate for TargetedFeeAdjustment } impl Convert for TargetedFeeAdjustment - where T: frame_system::Trait, S: Get, V: Get, M: Get, + where T: frame_system::Config, S: Get, V: Get, M: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless @@ -158,14 +174,14 @@ impl Convert for TargetedFeeAdjustment::AvailableBlockRatio::get() * - ::MaximumBlockWeight::get(); - let normal_block_weight = - >::block_weight() - .get(frame_support::weights::DispatchClass::Normal) - .min(normal_max_weight); + let normal_max_weight = weights.get(DispatchClass::Normal).max_total + .unwrap_or_else(|| weights.max_block); + let current_block_weight = >::block_weight(); + let normal_block_weight = *current_block_weight + .get(DispatchClass::Normal) + .min(&normal_max_weight); let s = S::get(); let v = V::get(); @@ -213,7 +229,7 @@ impl Default for Releases { } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Handler for withdrawing, refunding and depositing the transaction fee. /// Transaction fees are withdrawn before the transaction is executed. /// After the transaction was executed the transaction weight can be @@ -233,7 +249,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as TransactionPayment { + trait Store for Module as TransactionPayment { pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::saturating_from_integer(1); StorageVersion build(|_: &GenesisConfig| Releases::V2): Releases; @@ -241,7 +257,7 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The fee to be paid for making a transaction; the per-byte portion. const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); @@ -257,13 +273,13 @@ decl_module! { fn integrity_test() { // given weight == u64, we build multipliers from `diff` of two weight values, which can - // at most be MaximumBlockWeight. Make sure that this can fit in a multiplier without + // at most be maximum block weight. Make sure that this can fit in a multiplier without // loss. use sp_std::convert::TryInto; assert!( ::max_value() >= Multiplier::checked_from_integer( - ::MaximumBlockWeight::get().try_into().unwrap() + T::BlockWeights::get().max_block.try_into().unwrap() ).unwrap(), ); @@ -272,9 +288,11 @@ decl_module! { // that if we collapse to minimum, the trend will be positive with a weight value // which is 1% more than the target. let min_value = T::FeeMultiplierUpdate::min(); - let mut target = - T::FeeMultiplierUpdate::target() * - (T::AvailableBlockRatio::get() * T::MaximumBlockWeight::get()); + let mut target = T::FeeMultiplierUpdate::target() * + T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( + "Setting `max_total` for `Normal` dispatch class is not compatible with \ + `transaction-payment` pallet." + ); // add 1 percent; let addition = target / 100; @@ -285,7 +303,7 @@ decl_module! { target += addition; sp_io::TestExternalities::new_empty().execute_with(|| { - >::set_block_limits(target, 0); + >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); assert!(next > min_value, "The minimum bound of the multiplier is too low. When \ block saturation is more than target by 1% and multiplier is minimal then \ @@ -296,7 +314,7 @@ decl_module! { } } -impl Module where +impl Module where BalanceOf: FixedPointOperand { /// Query the data that we know about the fee of a given `call`. @@ -312,8 +330,6 @@ impl Module where len: u32, ) -> RuntimeDispatchInfo> where - T: Send + Sync, - BalanceOf: Send + Sync, T::Call: Dispatchable, { // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some @@ -329,27 +345,19 @@ impl Module where RuntimeDispatchInfo { weight, class, partial_fee } } + /// Query the detailed fee of a given `call`. + pub fn query_fee_details( + unchecked_extrinsic: Extrinsic, + len: u32, + ) -> FeeDetails> + where + T::Call: Dispatchable, + { + let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); + Self::compute_fee_details(len, &dispatch_info, 0u32.into()) + } + /// Compute the final fee value for a particular transaction. - /// - /// The final fee is composed of: - /// - `base_fee`: This is the minimum amount a user pays for a transaction. It is declared - /// as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. - /// - `len_fee`: The length fee, the amount paid for the encoded length (in bytes) of the - /// transaction. - /// - `weight_fee`: This amount is computed based on the weight of the transaction. Weight - /// accounts for the execution time of a transaction. - /// - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on - /// the congestion of the network. - /// - (Optional) `tip`: If included in the transaction, the tip will be added on top. Only - /// signed transactions can have a tip. - /// - /// The base fee and adjusted weight and length fees constitute the _inclusion fee,_ which is - /// the minimum fee for a transaction to be included in a block. - /// - /// ```ignore - /// inclusion_fee = base_fee + len_fee + [targeted_fee_adjustment * weight_fee]; - /// final_fee = inclusion_fee + tip; - /// ``` pub fn compute_fee( len: u32, info: &DispatchInfoOf, @@ -357,7 +365,18 @@ impl Module where ) -> BalanceOf where T::Call: Dispatchable, { - Self::compute_fee_raw(len, info.weight, tip, info.pays_fee) + Self::compute_fee_details(len, info, tip).final_fee() + } + + /// Compute the fee details for a particular transaction. + pub fn compute_fee_details( + len: u32, + info: &DispatchInfoOf, + tip: BalanceOf, + ) -> FeeDetails> where + T::Call: Dispatchable, + { + Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) } /// Compute the actual post dispatch fee for a particular transaction. @@ -372,7 +391,25 @@ impl Module where ) -> BalanceOf where T::Call: Dispatchable, { - Self::compute_fee_raw(len, post_info.calc_actual_weight(info), tip, post_info.pays_fee(info)) + Self::compute_actual_fee_details(len, info, post_info, tip).final_fee() + } + + /// Compute the actual post dispatch fee details for a particular transaction. + pub fn compute_actual_fee_details( + len: u32, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + tip: BalanceOf, + ) -> FeeDetails> where + T::Call: Dispatchable, + { + Self::compute_fee_raw( + len, + post_info.calc_actual_weight(info), + tip, + post_info.pays_fee(info), + info.class, + ) } fn compute_fee_raw( @@ -380,7 +417,8 @@ impl Module where weight: Weight, tip: BalanceOf, pays_fee: Pays, - ) -> BalanceOf { + class: DispatchClass, + ) -> FeeDetails> { if pays_fee == Pays::Yes { let len = >::from(len); let per_byte = T::TransactionByteFee::get(); @@ -394,26 +432,33 @@ impl Module where // final adjusted weight fee. let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); - let base_fee = Self::weight_to_fee(T::ExtrinsicBaseWeight::get()); - base_fee - .saturating_add(fixed_len_fee) - .saturating_add(adjusted_weight_fee) - .saturating_add(tip) + let base_fee = Self::weight_to_fee(T::BlockWeights::get().get(class).base_extrinsic); + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee, + len_fee: fixed_len_fee, + adjusted_weight_fee + }), + tip + } } else { - tip + FeeDetails { + inclusion_fee: None, + tip + } } } fn weight_to_fee(weight: Weight) -> BalanceOf { // cap the weight to the maximum defined in runtime, otherwise it will be the // `Bounded` maximum of its data type, which is not desired. - let capped_weight = weight.min(::MaximumBlockWeight::get()); + let capped_weight = weight.min(T::BlockWeights::get().max_block); T::WeightToFee::calc(&capped_weight) } } impl Convert> for Module where - T: Trait, + T: Config, BalanceOf: FixedPointOperand, { /// Compute the fee for the specified weight. @@ -429,9 +474,9 @@ impl Convert> for Module where /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); +pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); -impl ChargeTransactionPayment where +impl ChargeTransactionPayment where T::Call: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { @@ -449,14 +494,14 @@ impl ChargeTransactionPayment where ) -> Result< ( BalanceOf, - <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ), TransactionValidityError, > { let tip = self.0; let fee = Module::::compute_fee(len as u32, info, tip); - <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee(who, call, info, fee, tip) + <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee(who, call, info, fee, tip) .map(|i| (fee, i)) } @@ -471,14 +516,15 @@ impl ChargeTransactionPayment where /// that the transaction which consumes more resources (either length or weight) with the same /// `fee` ends up having lower priority. fn get_priority(len: usize, info: &DispatchInfoOf, final_fee: BalanceOf) -> TransactionPriority { - let weight_saturation = T::MaximumBlockWeight::get() / info.weight.max(1); - let len_saturation = T::MaximumBlockLength::get() as u64 / (len as u64).max(1); + let weight_saturation = T::BlockWeights::get().max_block / info.weight.max(1); + let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Normal); + let len_saturation = max_block_length as u64 / (len as u64).max(1); let coefficient: BalanceOf = weight_saturation.min(len_saturation).saturated_into::>(); final_fee.saturating_mul(coefficient).saturated_into::() } } -impl sp_std::fmt::Debug for ChargeTransactionPayment { +impl sp_std::fmt::Debug for ChargeTransactionPayment { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "ChargeTransactionPayment<{:?}>", self.0) @@ -489,7 +535,7 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment } } -impl SignedExtension for ChargeTransactionPayment where +impl SignedExtension for ChargeTransactionPayment where BalanceOf: Send + Sync + From + FixedPointOperand, T::Call: Dispatchable, { @@ -503,7 +549,7 @@ impl SignedExtension for ChargeTransactionPayment whe // who paid the fee Self::AccountId, // imbalance resulting from withdrawing the fee - <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } @@ -554,9 +600,11 @@ impl SignedExtension for ChargeTransactionPayment whe #[cfg(test)] mod tests { use super::*; + use crate as pallet_transaction_payment; + use frame_system as system; use codec::Encode; use frame_support::{ - impl_outer_dispatch, impl_outer_origin, impl_outer_event, parameter_types, + parameter_types, weights::{ DispatchClass, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Weight, WeightToFeePolynomial, WeightToFeeCoefficients, WeightToFeeCoefficient, @@ -564,7 +612,6 @@ mod tests { traits::Currency, }; use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; use sp_core::H256; use sp_runtime::{ testing::{Header, TestXt}, @@ -574,49 +621,54 @@ mod tests { use std::cell::RefCell; use smallvec::smallvec; - const CALL: &::Call = - &Call::Balances(BalancesCall::transfer(2, 69)); + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; - impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - pallet_balances::Balances, - frame_system::System, - } - } - - impl_outer_event! { - pub enum Event for Runtime { - system, - pallet_balances, + frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, } - } - - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Runtime; + ); - use frame_system as system; - impl_outer_origin!{ - pub enum Origin for Runtime {} - } + const CALL: &::Call = + &Call::Balances(BalancesCall::transfer(2, 69)); thread_local! { static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); } - pub struct ExtrinsicBaseWeight; - impl Get for ExtrinsicBaseWeight { - fn get() -> u64 { EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()) } + pub struct BlockWeights; + impl Get for BlockWeights { + fn get() -> frame_system::limits::BlockWeights { + frame_system::limits::BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = 1024.into(); + }) + .build_or_panic() + } } parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static TransactionByteFee: u64 = 1; + pub static WeightToFee: u64 = 1; } - impl frame_system::Trait for Runtime { + impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -628,26 +680,20 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Runtime { + impl pallet_balances::Config for Runtime { type Balance = u64; type Event = Event; type DustRemoval = (); @@ -656,17 +702,7 @@ mod tests { type MaxLocks = (); type WeightInfo = (); } - thread_local! { - static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); - static WEIGHT_TO_FEE: RefCell = RefCell::new(1); - } - pub struct TransactionByteFee; - impl Get for TransactionByteFee { - fn get() -> u64 { TRANSACTION_BYTE_FEE.with(|v| *v.borrow()) } - } - - pub struct WeightToFee; impl WeightToFeePolynomial for WeightToFee { type Balance = u64; @@ -680,17 +716,13 @@ mod tests { } } - impl Trait for Runtime { + impl Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = (); } - type Balances = pallet_balances::Module; - type System = frame_system::Module; - type TransactionPayment = Module; - pub struct ExtBuilder { balance_factor: u64, base_weight: u64, @@ -858,7 +890,7 @@ mod tests { // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - ::MaximumBlockWeight::get()) as u64 + (10000 - ::BlockWeights::get().max_block) as u64 ); }); } @@ -956,7 +988,7 @@ mod tests { partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(MaximumBlockWeight::get()) as u64 * 2 * 3 / 2 /* weight */ + + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ }, ); @@ -1124,11 +1156,11 @@ mod tests { assert_eq!(Balances::free_balance(2), 0); // Transfer Event assert!(System::events().iter().any(|event| { - event.event == Event::pallet_balances(pallet_balances::RawEvent::Transfer(2, 3, 80)) + event.event == Event::pallet_balances(pallet_balances::Event::Transfer(2, 3, 80)) })); // Killed Event assert!(System::events().iter().any(|event| { - event.event == Event::system(system::RawEvent::KilledAccount(2)) + event.event == Event::system(system::Event::KilledAccount(2)) })); }); } diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index de39215b575be..f84b19d78c297 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -1,5 +1,5 @@ ///! Traits and default implementation for paying transaction fees. -use crate::Trait; +use crate::Config; use codec::FullCodec; use frame_support::{ traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, WithdrawReasons}, @@ -12,10 +12,10 @@ use sp_runtime::{ use sp_std::{fmt::Debug, marker::PhantomData}; type NegativeImbalanceOf = - ::AccountId>>::NegativeImbalance; + ::AccountId>>::NegativeImbalance; /// Handle withdrawing, refunding and depositing of transaction fees. -pub trait OnChargeTransaction { +pub trait OnChargeTransaction { /// The underlying integer type in which fees are calculated. type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; type LiquidityInfo: Default; @@ -55,17 +55,17 @@ pub struct CurrencyAdapter(PhantomData<(C, OU)>); /// Default implementation for a Currency and an OnUnbalanced handler. impl OnChargeTransaction for CurrencyAdapter where - T: Trait, - T::TransactionByteFee: Get<::AccountId>>::Balance>, - C: Currency<::AccountId>, + T: Config, + T::TransactionByteFee: Get<::AccountId>>::Balance>, + C: Currency<::AccountId>, C::PositiveImbalance: - Imbalance<::AccountId>>::Balance, Opposite = C::NegativeImbalance>, + Imbalance<::AccountId>>::Balance, Opposite = C::NegativeImbalance>, C::NegativeImbalance: - Imbalance<::AccountId>>::Balance, Opposite = C::PositiveImbalance>, + Imbalance<::AccountId>>::Balance, Opposite = C::PositiveImbalance>, OU: OnUnbalanced>, { type LiquidityInfo = Option>; - type Balance = ::AccountId>>::Balance; + type Balance = ::AccountId>>::Balance; /// Withdraw the predicted fee from the transaction origin. /// diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs new file mode 100644 index 0000000000000..ab771eb8ba5df --- /dev/null +++ b/frame/transaction-payment/src/types.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for transaction-payment RPC. + +use sp_std::prelude::*; +use frame_support::weights::{Weight, DispatchClass}; +use codec::{Encode, Decode}; +#[cfg(feature = "std")] +use serde::{Serialize, Deserialize}; +use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; + +/// The base fee and adjusted weight and length fees constitute the _inclusion fee_. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct InclusionFee { + /// This is the minimum amount a user pays for a transaction. It is declared + /// as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. + pub base_fee: Balance, + /// The length fee, the amount paid for the encoded length (in bytes) of the transaction. + pub len_fee: Balance, + /// - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on + /// the congestion of the network. + /// - `weight_fee`: This amount is computed based on the weight of the transaction. Weight + /// accounts for the execution time of a transaction. + /// + /// adjusted_weight_fee = targeted_fee_adjustment * weight_fee + pub adjusted_weight_fee: Balance, +} + +impl InclusionFee { + /// Returns the total of inclusion fee. + /// + /// ```ignore + /// inclusion_fee = base_fee + len_fee + adjusted_weight_fee + /// ``` + pub fn inclusion_fee(&self) -> Balance { + self.base_fee + .saturating_add(self.len_fee) + .saturating_add(self.adjusted_weight_fee) + } +} + +/// The `FeeDetails` is composed of: +/// - (Optional) `inclusion_fee`: Only the `Pays::Yes` transaction can have the inclusion fee. +/// - `tip`: If included in the transaction, the tip will be added on top. Only +/// signed transactions can have a tip. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct FeeDetails { + /// The minimum fee for a transaction to be included in a block. + pub inclusion_fee: Option>, + // Do not serialize and deserialize `tip` as we actually can not pass any tip to the RPC. + #[cfg_attr(feature = "std", serde(skip))] + pub tip: Balance, +} + +impl FeeDetails { + /// Returns the final fee. + /// + /// ```ignore + /// final_fee = inclusion_fee + tip; + /// ``` + pub fn final_fee(&self) -> Balance { + self.inclusion_fee.as_ref().map(|i| i.inclusion_fee()).unwrap_or_else(|| Zero::zero()).saturating_add(self.tip) + } +} + +/// Information related to a dispatchable's class, weight, and fee that can be queried from the runtime. +#[derive(Eq, PartialEq, Encode, Decode, Default)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] +#[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] +pub struct RuntimeDispatchInfo { + /// Weight of this dispatch. + pub weight: Weight, + /// Class of this dispatch. + pub class: DispatchClass, + /// The inclusion fee of this dispatch. + /// + /// This does not include a tip or anything else that + /// depends on the signature (i.e. depends on a `SignedExtension`). + #[cfg_attr(feature = "std", serde(with = "serde_balance"))] + pub partial_fee: Balance, +} + +#[cfg(feature = "std")] +mod serde_balance { + use serde::{Deserialize, Serializer, Deserializer}; + + pub fn serialize(t: &T, serializer: S) -> Result { + serializer.serialize_str(&t.to_string()) + } + + pub fn deserialize<'de, D: Deserializer<'de>, T: std::str::FromStr>(deserializer: D) -> Result { + let s = String::deserialize(deserializer)?; + s.parse::().map_err(|_| serde::de::Error::custom("Parse from string failed")) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_serialize_and_deserialize_properly_with_string() { + let info = RuntimeDispatchInfo { + weight: 5, + class: DispatchClass::Normal, + partial_fee: 1_000_000_u64, + }; + + let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; + + assert_eq!(serde_json::to_string(&info).unwrap(), json_str); + assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); + + // should not panic + serde_json::to_value(&info).unwrap(); + } + + #[test] + fn should_serialize_and_deserialize_properly_large_value() { + let info = RuntimeDispatchInfo { + weight: 5, + class: DispatchClass::Normal, + partial_fee: u128::max_value(), + }; + + let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + + assert_eq!(serde_json::to_string(&info).unwrap(), json_str); + assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); + + // should not panic + serde_json::to_value(&info).unwrap(); + } +} diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index fd2d103e9f335..461dc91223946 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-treasury" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../balances" } +impl-trait-for-tuples = "0.2.1" -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-io ={ version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } [features] default = ["std"] diff --git a/frame/treasury/README.md b/frame/treasury/README.md index 424b8e0eedf99..4b061359fea75 100644 --- a/frame/treasury/README.md +++ b/frame/treasury/README.md @@ -1,118 +1,31 @@ # Treasury Module -The Treasury module provides a "pot" of funds that can be managed by stakeholders in the -system and a structure for making spending proposals from this pot. - -- [`treasury::Trait`](https://docs.rs/pallet-treasury/latest/pallet_treasury/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-treasury/latest/pallet_treasury/enum.Call.html) +The Treasury module provides a "pot" of funds that can be managed by stakeholders in the system and +a structure for making spending proposals from this pot. ## Overview -The Treasury Module itself provides the pot to store funds, and a means for stakeholders to -propose, approve, and deny expenditures. The chain will need to provide a method (e.g. -inflation, fees) for collecting funds. - -By way of example, the Council could vote to fund the Treasury with a portion of the block -reward and use the funds to pay developers. - -### Tipping - -A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be -given without first having a pre-determined stakeholder group come to consensus on how much -should be paid. - -A group of `Tippers` is determined through the config `Trait`. After half of these have declared -some amount that they believe a particular reported reason deserves, then a countdown period is -entered where any remaining members can declare their tip amounts also. After the close of the -countdown period, the median of all declared tips is paid to the reported beneficiary, along -with any finders fee, in case of a public (and bonded) original report. - -### Bounty - -A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that -needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after -the bounty is approved and funded by Council, to be delegated -with the responsibility of assigning a payout address once the specified set of objectives is completed. - -After the Council has activated a bounty, it delegates the work that requires expertise to a curator -in exchange of a deposit. Once the curator accepts the bounty, they -get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout -address, the curator fee and the return of the curator deposit. The -delay allows for intervention through regular democracy. The Council gets to unassign the curator, -resulting in a new curator election. The Council also gets to cancel -the bounty if deemed necessary before assigning a curator or once the bounty is active or payout -is pending, resulting in the slash of the curator's deposit. +The Treasury Module itself provides the pot to store funds, and a means for stakeholders to propose, +approve, and deny expenditures. The chain will need to provide a method (e.g.inflation, fees) for +collecting funds. +By way of example, the Council could vote to fund the Treasury with a portion of the block reward +and use the funds to pay developers. ### Terminology - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. -- **Beneficiary:** An account who will receive the funds from a proposal iff -the proposal is approved. -- **Deposit:** Funds that a proposer must lock when making a proposal. The -deposit will be returned or slashed if the proposal is approved or rejected -respectively. +- **Beneficiary:** An account who will receive the funds from a proposal if the proposal is + approved. +- **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be returned + or slashed if the proposal is approved or rejected respectively. - **Pot:** Unspent funds accumulated by the treasury module. -Tipping protocol: -- **Tipping:** The process of gathering declarations of amounts to tip and taking the median - amount to be transferred from the treasury to a beneficiary account. -- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a - particular individual (identified by an account ID) is worthy of a recognition by the - treasury. -- **Finder:** The original public reporter of some reason for tipping. -- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, - rather than the main beneficiary. - -Bounty: -- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by -the Treasury. -- **Proposer:** An account proposing a bounty spending. -- **Curator:** An account managing the bounty and assigning a payout address receiving the reward -for the completion of work. -- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on -deposit per byte within the bounty description. -- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit -is returned when/if the bounty is completed. -- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is -rewarded. -- **Payout address:** The account to which the total or part of the bounty is assigned to. -- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. -- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. - ## Interface ### Dispatchable Functions General spending/proposal protocol: - `propose_spend` - Make a spending proposal and stake the required deposit. -- `set_pot` - Set the spendable balance of funds. -- `configure` - Configure the module's proposal requirements. - `reject_proposal` - Reject a proposal, slashing the deposit. - `approve_proposal` - Accept the proposal, returning the deposit. - -Tipping protocol: -- `report_awesome` - Report something worthy of a tip and register for a finders fee. -- `retract_tip` - Retract a previous (finders fee registered) report. -- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. -- `tip` - Declare or redeclare an amount to tip for a particular reason. -- `close_tip` - Close and pay out a tip. - -Bounty protocol: -- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of -tasks and stake the required deposit. -- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. -- `propose_curator` - Assign an account to a bounty as candidate curator. -- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. -- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. -- `award_bounty` - Close and pay out the specified amount for the completed work. -- `claim_bounty` - Claim a specific bounty amount from the Payout Address. -- `unassign_curator` - Unassign an accepted curator from a specific earmark. -- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. - - -## GenesisConfig - -The Treasury module depends on the [`GenesisConfig`](https://docs.rs/pallet-treasury/latest/pallet_treasury/struct.GenesisConfig.html). - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 2794e6cc43203..9cb214420ca43 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks_instance, account}; use frame_support::traits::OnInitialize; use crate::Module as Treasury; @@ -30,7 +30,7 @@ use crate::Module as Treasury; const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. -fn setup_proposal, I: Instance>(u: u32) -> ( +fn setup_proposal, I: Instance>(u: u32) -> ( T::AccountId, BalanceOf, ::Source, @@ -43,58 +43,8 @@ fn setup_proposal, I: Instance>(u: u32) -> ( (caller, value, beneficiary_lookup) } -// Create the pre-requisite information needed to create a `report_awesome`. -fn setup_awesome, I: Instance>(length: u32) -> (T::AccountId, Vec, T::AccountId) { - let caller = whitelisted_caller(); - let value = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * length.into() - + T::Currency::minimum_balance(); - let _ = T::Currency::make_free_balance_be(&caller, value); - let reason = vec![0; length as usize]; - let awesome_person = account("awesome", 0, SEED); - (caller, reason, awesome_person) -} - -// Create the pre-requisite information needed to call `tip_new`. -fn setup_tip, I: Instance>(r: u32, t: u32) -> - Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> -{ - let tippers_count = T::Tippers::count(); - - for i in 0 .. t { - let member = account("member", i, SEED); - T::Tippers::add(&member); - ensure!(T::Tippers::contains(&member), "failed to add tipper"); - } - - ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); - let caller = account("member", t - 1, SEED); - let reason = vec![0; r as usize]; - let beneficiary = account("beneficiary", t, SEED); - let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - Ok((caller, reason, beneficiary, value)) -} - -// Create `t` new tips for the tip proposal with `hash`. -// This function automatically makes the tip able to close. -fn create_tips, I: Instance>(t: u32, hash: T::Hash, value: BalanceOf) -> - Result<(), &'static str> -{ - for i in 0 .. t { - let caller = account("member", i, SEED); - ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); - Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; - } - Tips::::mutate(hash, |maybe_tip| { - if let Some(open_tip) = maybe_tip { - open_tip.closes = Some(T::BlockNumber::zero()); - } - }); - Ok(()) -} - // Create proposals that are approved for use in `on_initialize`. -fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'static str> { +fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'static str> { for i in 0 .. n { let (caller, value, lookup) = setup_proposal::(i); Treasury::::propose_spend( @@ -109,64 +59,14 @@ fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'s Ok(()) } -// Create bounties that are approved for use in `on_initialize`. -fn create_approved_bounties, I: Instance>(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { - let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - } - ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); - Ok(()) -} - -// Create the pre-requisite information needed to create a treasury `propose_bounty`. -fn setup_bounty, I: Instance>(u: u32, d: u32) -> ( - T::AccountId, - T::AccountId, - BalanceOf, - BalanceOf, - Vec, -) { - let caller = account("caller", u, SEED); - let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); - let fee = value / 2u32.into(); - let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); - let _ = T::Currency::make_free_balance_be(&caller, deposit); - let curator = account("curator", u, SEED); - let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); - let reason = vec![0; d as usize]; - (caller, curator, fee, value, reason) -} - -fn create_bounty, I: Instance>() -> Result<( - ::Source, - BountyIndex, -), &'static str> { - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; - Treasury::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; - Ok((curator_lookup, bounty_id)) -} - -fn setup_pod_account, I: Instance>() { +fn setup_pot_account, I: Instance>() { let pot_account = Treasury::::account_id(); let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); let _ = T::Currency::make_free_balance_be(&pot_account, value); } -const MAX_BYTES: u32 = 16384; -const MAX_TIPPERS: u32 = 100; - benchmarks_instance! { - _ { } - + propose_spend { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); // Whitelist caller account from further DB operations. @@ -194,193 +94,13 @@ benchmarks_instance! { let proposal_id = Treasury::::proposal_count() - 1; }: _(RawOrigin::Root, proposal_id) - report_awesome { - let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, awesome_person) - - retract_tip { - let r = MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - Treasury::::report_awesome( - RawOrigin::Signed(caller.clone()).into(), - reason.clone(), - awesome_person.clone() - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash) - - tip_new { - let r in 0 .. MAX_BYTES; - let t in 1 .. MAX_TIPPERS; - - let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, beneficiary, value) - - tip { - let t in 1 .. MAX_TIPPERS; - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t - 1, hash.clone(), value)?; - let caller = account("member", t - 1, SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash, value) - - close_tip { - let t in 1 .. MAX_TIPPERS; - - // Make sure pot is funded - setup_pod_account::(); - - // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - - // Create a bunch of tips - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t, hash.clone(), value)?; - - let caller = account("caller", t, SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash) - - propose_bounty { - let d in 0 .. MAX_BYTES; - - let (caller, curator, fee, value, description) = setup_bounty::(0, d); - }: _(RawOrigin::Signed(caller), value, description) - - approve_bounty { - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - }: _(RawOrigin::Root, bounty_id) - - propose_curator { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) - - // Worst case when curator is inactive and any sender unassigns the curator. - unassign_curator { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); - let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), bounty_id) - - accept_curator { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; - }: _(RawOrigin::Signed(curator), bounty_id) - - award_bounty { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); - }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) - - claim_bounty { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - - let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); - Treasury::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; - - frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); - - }: _(RawOrigin::Signed(curator), bounty_id) - - close_bounty_proposed { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) - - close_bounty_active { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) - - extend_bounty_expiry { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) - on_initialize_proposals { let p in 0 .. 100; - setup_pod_account::(); + setup_pot_account::(); create_approved_proposals::(p)?; }: { Treasury::::on_initialize(T::BlockNumber::zero()); } - - on_initialize_bounties { - let b in 0 .. 100; - setup_pod_account::(); - create_approved_bounties::(b)?; - }: { - Treasury::::on_initialize(T::BlockNumber::zero()); - } } #[cfg(test)] @@ -395,23 +115,7 @@ mod tests { assert_ok!(test_benchmark_propose_spend::()); assert_ok!(test_benchmark_reject_proposal::()); assert_ok!(test_benchmark_approve_proposal::()); - assert_ok!(test_benchmark_report_awesome::()); - assert_ok!(test_benchmark_retract_tip::()); - assert_ok!(test_benchmark_tip_new::()); - assert_ok!(test_benchmark_tip::()); - assert_ok!(test_benchmark_close_tip::()); - assert_ok!(test_benchmark_propose_bounty::()); - assert_ok!(test_benchmark_approve_bounty::()); - assert_ok!(test_benchmark_propose_curator::()); - assert_ok!(test_benchmark_unassign_curator::()); - assert_ok!(test_benchmark_accept_curator::()); - assert_ok!(test_benchmark_award_bounty::()); - assert_ok!(test_benchmark_claim_bounty::()); - assert_ok!(test_benchmark_close_bounty_proposed::()); - assert_ok!(test_benchmark_close_bounty_active::()); - assert_ok!(test_benchmark_extend_bounty_expiry::()); assert_ok!(test_benchmark_on_initialize_proposals::()); - assert_ok!(test_benchmark_on_initialize_bounties::()); }); } } diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 2ada0660f9ec2..b5e2c7881bb5f 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,10 @@ //! # Treasury Module //! -//! The Treasury module provides a "pot" of funds that can be managed by stakeholders in the -//! system and a structure for making spending proposals from this pot. +//! The Treasury module provides a "pot" of funds that can be managed by stakeholders in the system +//! and a structure for making spending proposals from this pot. //! -//! - [`treasury::Trait`](./trait.Trait.html) +//! - [`treasury::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -32,71 +32,16 @@ //! By way of example, the Council could vote to fund the Treasury with a portion of the block //! reward and use the funds to pay developers. //! -//! ### Tipping -//! -//! A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be -//! given without first having a pre-determined stakeholder group come to consensus on how much -//! should be paid. -//! -//! A group of `Tippers` is determined through the config `Trait`. After half of these have declared -//! some amount that they believe a particular reported reason deserves, then a countdown period is -//! entered where any remaining members can declare their tip amounts also. After the close of the -//! countdown period, the median of all declared tips is paid to the reported beneficiary, along -//! with any finders fee, in case of a public (and bonded) original report. -//! -//! ### Bounty -//! -//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that -//! needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after -//! the bounty is approved and funded by Council, to be delegated -//! with the responsibility of assigning a payout address once the specified set of objectives is completed. -//! -//! After the Council has activated a bounty, it delegates the work that requires expertise to a curator -//! in exchange of a deposit. Once the curator accepts the bounty, they -//! get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout -//! address, the curator fee and the return of the curator deposit. The -//! delay allows for intervention through regular democracy. The Council gets to unassign the curator, -//! resulting in a new curator election. The Council also gets to cancel -//! the bounty if deemed necessary before assigning a curator or once the bounty is active or payout -//! is pending, resulting in the slash of the curator's deposit. -//! //! //! ### Terminology //! //! - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. -//! - **Beneficiary:** An account who will receive the funds from a proposal iff -//! the proposal is approved. -//! - **Deposit:** Funds that a proposer must lock when making a proposal. The -//! deposit will be returned or slashed if the proposal is approved or rejected -//! respectively. +//! - **Beneficiary:** An account who will receive the funds from a proposal iff the proposal is +//! approved. +//! - **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be +//! returned or slashed if the proposal is approved or rejected respectively. //! - **Pot:** Unspent funds accumulated by the treasury module. //! -//! Tipping protocol: -//! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median -//! amount to be transferred from the treasury to a beneficiary account. -//! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a -//! particular individual (identified by an account ID) is worthy of a recognition by the -//! treasury. -//! - **Finder:** The original public reporter of some reason for tipping. -//! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, -//! rather than the main beneficiary. -//! -//! Bounty: -//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by -//! the Treasury. -//! - **Proposer:** An account proposing a bounty spending. -//! - **Curator:** An account managing the bounty and assigning a payout address receiving the reward -//! for the completion of work. -//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on -//! deposit per byte within the bounty description. -//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit -//! is returned when/if the bounty is completed. -//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is -//! rewarded. -//! - **Payout address:** The account to which the total or part of the bounty is assigned to. -//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. -//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. -//! //! ## Interface //! //! ### Dispatchable Functions @@ -106,62 +51,43 @@ //! - `reject_proposal` - Reject a proposal, slashing the deposit. //! - `approve_proposal` - Accept the proposal, returning the deposit. //! -//! Tipping protocol: -//! - `report_awesome` - Report something worthy of a tip and register for a finders fee. -//! - `retract_tip` - Retract a previous (finders fee registered) report. -//! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. -//! - `tip` - Declare or redeclare an amount to tip for a particular reason. -//! - `close_tip` - Close and pay out a tip. -//! -//! Bounty protocol: -//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of -//! tasks and stake the required deposit. -//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. -//! - `propose_curator` - Assign an account to a bounty as candidate curator. -//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. -//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. -//! - `award_bounty` - Close and pay out the specified amount for the completed work. -//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. -//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. -//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. -//! -//! //! ## GenesisConfig //! //! The Treasury module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] mod tests; mod benchmarking; + pub mod weights; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error, Parameter}; +use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error}; use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive, AllowDeath}, + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive}, ReservableCurrency, WithdrawReasons }; -use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, DispatchResult, traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin +use sp_runtime::{Permill, ModuleId, RuntimeDebug, traits::{ + Zero, StaticLookup, AccountIdConversion, Saturating }}; -use frame_support::dispatch::DispatchResultWithPostInfo; use frame_support::weights::{Weight, DispatchClass}; -use frame_support::traits::{Contains, ContainsLengthBound, EnsureOrigin}; +use frame_support::traits::{EnsureOrigin}; use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed}; +use frame_system::{ensure_signed}; pub use weights::WeightInfo; -type BalanceOf = - <>::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = - <>::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = - <>::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub type PositiveImbalanceOf = + <>::Currency as Currency<::AccountId>>::PositiveImbalance; +pub type NegativeImbalanceOf = + <>::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The treasury's module id, used for deriving its sovereign account ID. type ModuleId: Get; @@ -174,25 +100,8 @@ pub trait Trait: frame_system::Trait { /// Origin from which rejections must come. type RejectOrigin: EnsureOrigin; - /// Origin from which tippers must come. - /// - /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy operation). - type Tippers: Contains + ContainsLengthBound; - - /// The period for which a tip remains open after is has achieved threshold tippers. - type TipCountdown: Get; - - /// The percent of the final tip which goes to the original reporter of the tip. - type TipFindersFee: Get; - - /// The amount held on deposit for placing a tip report. - type TipReportDepositBase: Get>; - - /// The amount held on deposit per byte within the tip report reason or bounty description. - type DataDepositPerByte: Get>; - /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. type OnSlash: OnUnbalanced>; @@ -210,29 +119,36 @@ pub trait Trait: frame_system::Trait { /// Percentage of spare funds (if any) that are burnt per spend period. type Burn: Get; - /// The amount held on deposit for placing a bounty proposal. - type BountyDepositBase: Get>; - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - type BountyDepositPayoutDelay: Get; - - /// Bounty duration in blocks. - type BountyUpdatePeriod: Get; - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - type BountyCuratorDeposit: Get; - - /// Minimum value for a bounty. - type BountyValueMinimum: Get>; - - /// Maximum acceptable reason length. - type MaximumReasonLength: Get; - /// Handler for the unbalanced decrease when treasury funds are burned. type BurnDestination: OnUnbalanced>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Runtime hooks to external pallet using treasury to compute spend funds. + type SpendFunds: SpendFunds; +} + +/// A trait to allow the Treasury Pallet to spend it's funds for other purposes. +/// There is an expectation that the implementer of this trait will correctly manage +/// the mutable variables passed to it: +/// * `budget_remaining`: How much available funds that can be spent by the treasury. +/// As funds are spent, you must correctly deduct from this value. +/// * `imbalance`: Any imbalances that you create should be subsumed in here to +/// maximize efficiency of updating the total issuance. (i.e. `deposit_creating`) +/// * `total_weight`: Track any weight that your `spend_fund` implementation uses by +/// updating this value. +/// * `missed_any`: If there were items that you want to spend on, but there were +/// not enough funds, mark this value as `true`. This will prevent the treasury +/// from burning the excess funds. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait SpendFunds, I=DefaultInstance> { + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool, + ); } /// An index of a proposal. Just a `u32`. @@ -252,122 +168,18 @@ pub struct Proposal { bond: Balance, } -/// An open tipping "motion". Retains all details of a tip including information on the finder -/// and the members who have voted. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] -pub struct OpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, -> { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip. - finder: AccountId, - /// The amount held on deposit for this tip. - deposit: Balance, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - /// Whether this tip should result in the finder taking a fee. - finders_fee: bool, -} - -/// An index of a bounty. Just a `u32`. -pub type BountyIndex = u32; - -/// A bounty proposal. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct Bounty { - /// The account proposing it. - proposer: AccountId, - /// The (total) amount that should be paid if the bounty is rewarded. - value: Balance, - /// The curator fee. Included in value. - fee: Balance, - /// The deposit of curator. - curator_deposit: Balance, - /// The amount held on deposit (reserved) for making this proposal. - bond: Balance, - /// The status of this bounty. - status: BountyStatus, -} - -/// The status of a bounty proposal. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub enum BountyStatus { - /// The bounty is proposed and waiting for approval. - Proposed, - /// The bounty is approved and waiting to become active at next spend period. - Approved, - /// The bounty is funded and waiting for curator assignment. - Funded, - /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the curator. - CuratorProposed { - /// The assigned curator of this bounty. - curator: AccountId, - }, - /// The bounty is active and waiting to be awarded. - Active { - /// The curator of this bounty. - curator: AccountId, - /// An update from the curator is due by this block, else they are considered inactive. - update_due: BlockNumber, - }, - /// The bounty is awarded and waiting to released after a delay. - PendingPayout { - /// The curator of this bounty. - curator: AccountId, - /// The beneficiary of the bounty. - beneficiary: AccountId, - /// When the bounty can be claimed. - unlock_at: BlockNumber, - }, -} - decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Treasury { + trait Store for Module, I: Instance=DefaultInstance> as Treasury { /// Number of proposals that have been made. ProposalCount get(fn proposal_count): ProposalIndex; /// Proposals that have been made. - Proposals get(fn proposals): + pub Proposals get(fn proposals): map hasher(twox_64_concat) ProposalIndex => Option>>; /// Proposal indices that have been approved but not yet awarded. - Approvals get(fn approvals): Vec; - - /// Tips that are not yet completed. Keyed by the hash of `(reason, who)` from the value. - /// This has the insecure enumerable hash function since the key itself is already - /// guaranteed to be a secure hash. - pub Tips get(fn tips): - map hasher(twox_64_concat) T::Hash - => Option, T::BlockNumber, T::Hash>>; - - /// Simple preimage lookup from the reason's hash to the original data. Again, has an - /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. - pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; - - /// Number of bounty proposals that have been made. - pub BountyCount get(fn bounty_count): BountyIndex; - - /// Bounties that have been made. - pub Bounties get(fn bounties): - map hasher(twox_64_concat) BountyIndex - => Option, T::BlockNumber>>; - - /// The description of each bounty. - pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; - - /// Bounty indices that have been approved but not yet funded. - pub BountyApprovals get(fn bounty_approvals): Vec; + pub Approvals get(fn approvals): Vec; } add_extra_genesis { build(|_config| { @@ -388,8 +200,7 @@ decl_event!( pub enum Event where Balance = BalanceOf, - ::AccountId, - ::Hash, + ::AccountId, { /// New proposal. \[proposal_index\] Proposed(ProposalIndex), @@ -406,66 +217,21 @@ decl_event!( Rollover(Balance), /// Some funds have been deposited. \[deposit\] Deposit(Balance), - /// A new tip suggestion has been opened. \[tip_hash\] - NewTip(Hash), - /// A tip suggestion has reached threshold and is closing. \[tip_hash\] - TipClosing(Hash), - /// A tip suggestion has been closed. \[tip_hash, who, payout\] - TipClosed(Hash, AccountId, Balance), - /// A tip suggestion has been retracted. \[tip_hash\] - TipRetracted(Hash), - /// New bounty proposal. [index] - BountyProposed(BountyIndex), - /// A bounty proposal was rejected; funds were slashed. [index, bond] - BountyRejected(BountyIndex, Balance), - /// A bounty proposal is funded and became active. [index] - BountyBecameActive(BountyIndex), - /// A bounty is awarded to a beneficiary. [index, beneficiary] - BountyAwarded(BountyIndex, AccountId), - /// A bounty is claimed by beneficiary. [index, payout, beneficiary] - BountyClaimed(BountyIndex, Balance, AccountId), - /// A bounty is cancelled. [index] - BountyCanceled(BountyIndex), - /// A bounty expiry is extended. [index] - BountyExtended(BountyIndex), } ); decl_error! { /// Error for the treasury module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Proposer's balance is too low. InsufficientProposersBalance, /// No proposal or bounty at that index. InvalidIndex, - /// The reason given is just too big. - ReasonTooBig, - /// The tip was already found/started. - AlreadyKnown, - /// The tip hash is unknown. - UnknownTip, - /// The account attempting to retract the tip is not the finder of the tip. - NotFinder, - /// The tip cannot be claimed/closed because there are not enough tippers yet. - StillOpen, - /// The tip cannot be claimed/closed because it's still in the countdown period. - Premature, - /// The bounty status is unexpected. - UnexpectedStatus, - /// Require bounty curator. - RequireCurator, - /// Invalid bounty value. - InvalidValue, - /// Invalid bounty fee. - InvalidFee, - /// A bounty payout is pending. - /// To cancel the bounty, you must unassign and slash the curator. - PendingPayout, } } decl_module! { - pub struct Module, I: Instance=DefaultInstance> + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { @@ -482,35 +248,9 @@ decl_module! { /// Percentage of spare funds (if any) that are burnt per spend period. const Burn: Permill = T::Burn::get(); - /// The period for which a tip remains open after is has achieved threshold tippers. - const TipCountdown: T::BlockNumber = T::TipCountdown::get(); - - /// The amount of the final tip which goes to the original reporter of the tip. - const TipFindersFee: Percent = T::TipFindersFee::get(); - - /// The amount held on deposit for placing a tip report. - const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); - - /// The amount held on deposit per byte within the tip report reason or bounty description. - const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); - /// The treasury's module id, used for deriving its sovereign account ID. const ModuleId: ModuleId = T::ModuleId::get(); - /// The amount held on deposit for placing a bounty proposal. - const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); - - const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); - - /// Maximum acceptable reason length. - const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); - type Error = Error; fn deposit_event() = default; @@ -525,7 +265,7 @@ decl_module! { /// - DbWrites: `ProposalCount`, `Proposals`, `origin account` /// # #[weight = T::WeightInfo::propose_spend()] - fn propose_spend( + pub fn propose_spend( origin, #[compact] value: BalanceOf, beneficiary: ::Source @@ -554,7 +294,7 @@ decl_module! { /// - DbWrites: `Proposals`, `rejected proposer account` /// # #[weight = (T::WeightInfo::reject_proposal(), DispatchClass::Operational)] - fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { + pub fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::RejectOrigin::ensure_origin(origin)?; let proposal = >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; @@ -576,571 +316,13 @@ decl_module! { /// - DbWrite: `Approvals` /// # #[weight = (T::WeightInfo::approve_proposal(), DispatchClass::Operational)] - fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { + pub fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::ApproveOrigin::ensure_origin(origin)?; ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); Approvals::::append(proposal_id); } - /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `DataDepositPerByte` for each byte in `reason`. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - Complexity: `O(R)` where `R` length of `reason`. - /// - encoding and hashing of 'reason' - /// - DbReads: `Reasons`, `Tips` - /// - DbWrites: `Reasons`, `Tips` - /// # - #[weight = T::WeightInfo::report_awesome(reason.len() as u32)] - fn report_awesome(origin, reason: Vec, who: T::AccountId) { - let finder = ensure_signed(origin)?; - - ensure!(reason.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); - - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); - - let deposit = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * (reason.len() as u32).into(); - T::Currency::reserve(&finder, deposit)?; - - Reasons::::insert(&reason_hash, &reason); - let tip = OpenTip { - reason: reason_hash, - who, - finder, - deposit, - closes: None, - tips: vec![], - finders_fee: true - }; - Tips::::insert(&hash, tip); - Self::deposit_event(RawEvent::NewTip(hash)); - } - - /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. - /// - /// If successful, the original deposit will be unreserved. - /// - /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` - /// must have been reported by the signing account through `report_awesome` (and not - /// through `tip_new`). - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// Emits `TipRetracted` if successful. - /// - /// # - /// - Complexity: `O(1)` - /// - Depends on the length of `T::Hash` which is fixed. - /// - DbReads: `Tips`, `origin account` - /// - DbWrites: `Reasons`, `Tips`, `origin account` - /// # - #[weight = T::WeightInfo::retract_tip()] - fn retract_tip(origin, hash: T::Hash) { - let who = ensure_signed(origin)?; - let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - ensure!(tip.finder == who, Error::::NotFinder); - - Reasons::::remove(&tip.reason); - Tips::::remove(&hash); - if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&who, tip.deposit); - } - Self::deposit_event(RawEvent::TipRetracted(hash)); - } - - /// Give a tip for something new; no finder's fee will be taken. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. - /// - `O(T)`: decoding `Tipper` vec of length `T` - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - `O(R)`: hashing and encoding of reason of length `R` - /// - DbReads: `Tippers`, `Reasons` - /// - DbWrites: `Reasons`, `Tips` - /// # - #[weight = T::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32)] - fn tip_new(origin, reason: Vec, who: T::AccountId, #[compact] tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - - Reasons::::insert(&reason_hash, &reason); - Self::deposit_event(RawEvent::NewTip(hash.clone())); - let tips = vec![(tipper.clone(), tip_value)]; - let tip = OpenTip { - reason: reason_hash, - who, - finder: tipper, - deposit: Zero::zero(), - closes: None, - tips, - finders_fee: false, - }; - Tips::::insert(&hash, tip); - } - - /// Declare a tip value for an already-open tip. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary - /// account ID. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period - /// has started. - /// - /// # - /// - Complexity: `O(T)` where `T` is the number of tippers. - /// decoding `Tipper` vec of length `T`, insert tip and check closing, - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it - /// is weighted as if almost full i.e of length `T-1`. - /// - DbReads: `Tippers`, `Tips` - /// - DbWrites: `Tips` - /// # - #[weight = T::WeightInfo::tip(T::Tippers::max_len() as u32)] - fn tip(origin, hash: T::Hash, #[compact] tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - - let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { - Self::deposit_event(RawEvent::TipClosing(hash.clone())); - } - Tips::::insert(&hash, tip); - } - - /// Close and payout a tip. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// The tip identified by `hash` must have finished its countdown period. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// # - /// - Complexity: `O(T)` where `T` is the number of tippers. - /// decoding `Tipper` vec of length `T`. - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - DbReads: `Tips`, `Tippers`, `tip finder` - /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` - /// # - #[weight = T::WeightInfo::close_tip(T::Tippers::max_len() as u32)] - fn close_tip(origin, hash: T::Hash) { - ensure_signed(origin)?; - - let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(system::Module::::block_number() >= *n, Error::::Premature); - // closed. - Reasons::::remove(&tip.reason); - Tips::::remove(hash); - Self::payout_tip(hash, tip); - } - - /// Propose a new bounty. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, - /// or slashed when rejected. - /// - /// - `curator`: The curator account whom will manage this bounty. - /// - `fee`: The curator fee. - /// - `value`: The total payment amount of this bounty, curator fee included. - /// - `description`: The description of this bounty. - #[weight = T::WeightInfo::propose_bounty(description.len() as u32)] - fn propose_bounty( - origin, - #[compact] value: BalanceOf, - description: Vec, - ) { - let proposer = ensure_signed(origin)?; - Self::create_bounty(proposer, description, value)?; - } - - /// Approve a bounty proposal. At a later time, the bounty will be funded and become active - /// and the original deposit will be returned. - /// - /// May only be called from `T::ApproveOrigin`. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::approve_bounty()] - fn approve_bounty(origin, #[compact] bounty_id: ProposalIndex) { - T::ApproveOrigin::ensure_origin(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); - - bounty.status = BountyStatus::Approved; - - BountyApprovals::::append(bounty_id); - - Ok(()) - })?; - } - - /// Assign a curator to a funded bounty. - /// - /// May only be called from `T::ApproveOrigin`. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::propose_curator()] - fn propose_curator( - origin, - #[compact] bounty_id: ProposalIndex, - curator: ::Source, - #[compact] fee: BalanceOf, - ) { - T::ApproveOrigin::ensure_origin(origin)?; - - let curator = T::Lookup::lookup(curator)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - match bounty.status { - BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => {}, - _ => return Err(Error::::UnexpectedStatus.into()), - }; - - ensure!(fee < bounty.value, Error::::InvalidFee); - - bounty.status = BountyStatus::CuratorProposed { curator }; - bounty.fee = fee; - - Ok(()) - })?; - } - - /// Unassign curator from a bounty. - /// - /// This function can only be called by the `RejectOrigin` a signed origin. - /// - /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious - /// or inactive. As a result, we will slash the curator when possible. - /// - /// If the origin is the curator, we take this as a sign they are unable to do their job and - /// they willingly give up. We could slash them, but for now we allow them to recover their - /// deposit and exit without issue. (We may want to change this if it is abused.) - /// - /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows - /// anyone in the community to call out that a curator is not doing their due diligence, and - /// we should pick a new curator. In this case the curator should also be slashed. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::unassign_curator()] - fn unassign_curator( - origin, - #[compact] bounty_id: ProposalIndex, - ) { - let maybe_sender = ensure_signed(origin.clone()) - .map(Some) - .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; - - match bounty.status { - BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { - // No curator to unassign at this point. - return Err(Error::::UnexpectedStatus.into()) - } - BountyStatus::CuratorProposed { ref curator } => { - // A curator has been proposed, but not accepted yet. - // Either `RejectOrigin` or the proposed curator can unassign the curator. - ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); - }, - BountyStatus::Active { ref curator, ref update_due } => { - // The bounty is active. - match maybe_sender { - // If the `RejectOrigin` is calling this function, slash the curator. - None => { - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - }, - Some(sender) => { - // If the sender is not the curator, and the curator is inactive, - // slash the curator. - if sender != *curator { - let block_number = system::Module::::block_number(); - if *update_due < block_number { - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - } else { - // Curator has more time to give an update. - return Err(Error::::Premature.into()) - } - } else { - // Else this is the curator, willingly giving up their role. - // Give back their deposit. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - // Continue to change bounty status below... - } - }, - } - }, - BountyStatus::PendingPayout { ref curator, .. } => { - // The bounty is pending payout, so only council can unassign a curator. - // By doing so, they are claiming the curator is acting maliciously, so - // we slash the curator. - ensure!(maybe_sender.is_none(), BadOrigin); - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - } - }; - - bounty.status = BountyStatus::Funded; - Ok(()) - })?; - } - - /// Accept the curator role for a bounty. - /// A deposit will be reserved from curator and refund upon successful payout. - /// - /// May only be called from the curator. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::accept_curator()] - fn accept_curator(origin, #[compact] bounty_id: ProposalIndex) { - let signer = ensure_signed(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - match bounty.status { - BountyStatus::CuratorProposed { ref curator } => { - ensure!(signer == *curator, Error::::RequireCurator); - - let deposit = T::BountyCuratorDeposit::get() * bounty.fee; - T::Currency::reserve(curator, deposit)?; - bounty.curator_deposit = deposit; - - let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); - bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; - - Ok(()) - }, - _ => Err(Error::::UnexpectedStatus.into()), - } - })?; - } - - /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. - /// - /// The dispatch origin for this call must be the curator of this bounty. - /// - /// - `bounty_id`: Bounty ID to award. - /// - `beneficiary`: The beneficiary account whom will receive the payout. - #[weight = T::WeightInfo::award_bounty()] - fn award_bounty(origin, #[compact] bounty_id: ProposalIndex, beneficiary: ::Source) { - let signer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - match &bounty.status { - BountyStatus::Active { - curator, - .. - } => { - ensure!(signer == *curator, Error::::RequireCurator); - }, - _ => return Err(Error::::UnexpectedStatus.into()), - } - bounty.status = BountyStatus::PendingPayout { - curator: signer, - beneficiary: beneficiary.clone(), - unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), - }; - - Ok(()) - })?; - - Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); - } - - /// Claim the payout from an awarded bounty after payout delay. - /// - /// The dispatch origin for this call must be the beneficiary of this bounty. - /// - /// - `bounty_id`: Bounty ID to claim. - #[weight = T::WeightInfo::claim_bounty()] - fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { - let _ = ensure_signed(origin)?; // anyone can trigger claim - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; - if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { - ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); - let bounty_account = Self::bounty_account_id(bounty_id); - let balance = T::Currency::free_balance(&bounty_account); - let fee = bounty.fee.min(balance); // just to be safe - let payout = balance.saturating_sub(fee); - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail - let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail - *maybe_bounty = None; - - BountyDescriptions::::remove(bounty_id); - - Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); - Ok(()) - } else { - Err(Error::::UnexpectedStatus.into()) - } - })?; - } - - /// Cancel a proposed or active bounty. All the funds will be sent to treasury and - /// the curator deposit will be unreserved if possible. - /// - /// Only `T::RejectOrigin` is able to cancel a bounty. - /// - /// - `bounty_id`: Bounty ID to cancel. - #[weight = T::WeightInfo::close_bounty_proposed().max(T::WeightInfo::close_bounty_active())] - fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { - T::RejectOrigin::ensure_origin(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { - let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; - - match &bounty.status { - BountyStatus::Proposed => { - // The reject origin would like to cancel a proposed bounty. - BountyDescriptions::::remove(bounty_id); - let value = bounty.bond; - let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyRejected(bounty_id, value)); - // Return early, nothing else to do. - return Ok(Some(T::WeightInfo::close_bounty_proposed()).into()) - }, - BountyStatus::Approved => { - // For weight reasons, we don't allow a council to cancel in this phase. - // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()) - }, - BountyStatus::Funded | - BountyStatus::CuratorProposed { .. } => { - // Nothing extra to do besides the removal of the bounty below. - }, - BountyStatus::Active { curator, .. } => { - // Cancelled by council, refund deposit of the working curator. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - // Then execute removal of the bounty below. - }, - BountyStatus::PendingPayout { .. } => { - // Bounty is already pending payout. If council wants to cancel - // this bounty, it should mean the curator was acting maliciously. - // So the council should first unassign the curator, slashing their - // deposit. - return Err(Error::::PendingPayout.into()) - } - } - - let bounty_account = Self::bounty_account_id(bounty_id); - - BountyDescriptions::::remove(bounty_id); - - let balance = T::Currency::free_balance(&bounty_account); - let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyCanceled(bounty_id)); - Ok(Some(T::WeightInfo::close_bounty_active()).into()) - }) - } - - /// Extend the expiry time of an active bounty. - /// - /// The dispatch origin for this call must be the curator of this bounty. - /// - /// - `bounty_id`: Bounty ID to extend. - /// - `remark`: additional information. - #[weight = T::WeightInfo::extend_bounty_expiry()] - fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { - let signer = ensure_signed(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - match bounty.status { - BountyStatus::Active { ref curator, ref mut update_due } => { - ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); - }, - _ => return Err(Error::::UnexpectedStatus.into()), - } - - Ok(()) - })?; - - Self::deposit_event(Event::::BountyExtended(bounty_id)); - } - /// # /// - Complexity: `O(A)` where `A` is the number of approvals /// - Db reads and writes: `Approvals`, `pot account data` @@ -1159,7 +341,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { // Add public immutables and private mutables. /// The account ID of the treasury pot. @@ -1170,93 +352,13 @@ impl, I: Instance> Module { T::ModuleId::get().into_account() } - /// The account ID of a bounty account - pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { - // only use two byte prefix to support 16 byte account id (used by test) - // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index - T::ModuleId::get().into_sub_account(("bt", id)) - } - /// The needed bond for a proposal whose spend is `value`. fn calculate_bond(value: BalanceOf) -> BalanceOf { T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value) } - /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it - /// closes, if so, then deposit the relevant event and set closing accordingly. - /// - /// `O(T)` and one storage access. - fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, - tipper: T::AccountId, - tip_value: BalanceOf, - ) -> bool { - match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { - Ok(pos) => tip.tips[pos] = (tipper, tip_value), - Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), - } - Self::retain_active_tips(&mut tip.tips); - let threshold = (T::Tippers::count() + 1) / 2; - if tip.tips.len() >= threshold && tip.closes.is_none() { - tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); - true - } else { - false - } - } - - /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. - fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { - let members = T::Tippers::sorted_members(); - let mut members_iter = members.iter(); - let mut member = members_iter.next(); - tips.retain(|(ref a, _)| loop { - match member { - None => break false, - Some(m) if m > a => break false, - Some(m) => { - member = members_iter.next(); - if m < a { - continue - } else { - break true; - } - } - } - }); - } - - /// Execute the payout of a tip. - /// - /// Up to three balance operations. - /// Plus `O(T)` (`T` is Tippers length). - fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { - let mut tips = tip.tips; - Self::retain_active_tips(&mut tips); - tips.sort_by_key(|i| i.1); - let treasury = Self::account_id(); - let max_payout = Self::pot(); - let mut payout = tips[tips.len() / 2].1.min(max_payout); - if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&tip.finder, tip.deposit); - } - if tip.finders_fee { - if tip.finder != tip.who { - // pay out the finder's fee. - let finders_fee = T::TipFindersFee::get() * payout; - payout -= finders_fee; - // this should go through given we checked it's at most the free balance, but still - // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); - } - } - // same as above: best-effort only. - let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); - Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); - } - /// Spend some money! returns number of approvals before spend. - fn spend_funds() -> Weight { + pub fn spend_funds() -> Weight { let mut total_weight: Weight = Zero::zero(); let mut budget_remaining = Self::pot(); @@ -1295,38 +397,8 @@ impl, I: Instance> Module { total_weight += T::WeightInfo::on_initialize_proposals(proposals_len); - let bounties_len = BountyApprovals::::mutate(|v| { - let bounties_approval_len = v.len() as u32; - v.retain(|&index| { - Bounties::::mutate(index, |bounty| { - // Should always be true, but shouldn't panic if false or we're screwed. - if let Some(bounty) = bounty { - if bounty.value <= budget_remaining { - budget_remaining -= bounty.value; - - bounty.status = BountyStatus::Funded; - - // return their deposit. - let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); - - // fund the bounty account - imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); - - Self::deposit_event(RawEvent::BountyBecameActive(index)); - false - } else { - missed_any = true; - true - } - } else { - false - } - }) - }); - bounties_approval_len - }); - - total_weight += T::WeightInfo::on_initialize_bounties(bounties_len); + // Call Runtime hooks to external pallet using treasury to compute spend funds. + T::SpendFunds::spend_funds( &mut budget_remaining, &mut imbalance, &mut total_weight, &mut missed_any); if !missed_any { // burn some proportion of the remaining budget if we run a surplus. @@ -1361,98 +433,15 @@ impl, I: Instance> Module { /// Return the amount of money in the pot. // The existential deposit is not part of the pot so treasury account never gets deleted. - fn pot() -> BalanceOf { + pub fn pot() -> BalanceOf { T::Currency::free_balance(&Self::account_id()) // Must never be less than 0 but better be safe. .saturating_sub(T::Currency::minimum_balance()) } - fn create_bounty( - proposer: T::AccountId, - description: Vec, - value: BalanceOf, - ) -> DispatchResult { - ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); - ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); - - let index = Self::bounty_count(); - - // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * (description.len() as u32).into(); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - BountyCount::::put(index + 1); - - let bounty = Bounty { - proposer, - value, - fee: 0u32.into(), - curator_deposit: 0u32.into(), - bond, - status: BountyStatus::Proposed, - }; - - Bounties::::insert(index, &bounty); - BountyDescriptions::::insert(index, description); - - Self::deposit_event(RawEvent::BountyProposed(index)); - - Ok(()) - } - - pub fn migrate_retract_tip_for_tip_new() { - /// An open tipping "motion". Retains all details of a tip including information on the finder - /// and the members who have voted. - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] - pub struct OldOpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, - > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - } - - use frame_support::{Twox64Concat, migration::StorageKeyIterator}; - - for (hash, old_tip) in StorageKeyIterator::< - T::Hash, - OldOpenTip, T::BlockNumber, T::Hash>, - Twox64Concat, - >::new(I::PREFIX.as_bytes(), b"Tips").drain() - { - let (finder, deposit, finders_fee) = match old_tip.finder { - Some((finder, deposit)) => (finder, deposit, true), - None => (T::AccountId::default(), Zero::zero(), false), - }; - let new_tip = OpenTip { - reason: old_tip.reason, - who: old_tip.who, - finder, - deposit, - closes: old_tip.closes, - tips: old_tip.tips, - finders_fee - }; - Tips::::insert(hash, new_tip) - } - } } -impl, I: Instance> OnUnbalanced> for Module { +impl, I: Instance> OnUnbalanced> for Module { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 88c4f23b91ae2..3c70099843ea8 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,52 +19,50 @@ #![cfg(test)] +use crate as treasury; use super::*; use std::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, impl_outer_origin, impl_outer_event, parameter_types, weights::Weight, - traits::{Contains, OnInitialize} + assert_noop, assert_ok, parameter_types, + traits::OnInitialize, }; + use sp_core::H256; use sp_runtime::{ - Perbill, ModuleId, + ModuleId, testing::Header, - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + traits::{BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - - -mod treasury { - // Re-export needed for `impl_outer_event!`. - pub use super::super::*; -} - -impl_outer_event! { - pub enum Event for Test { - system, - pallet_balances, - treasury, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Treasury: treasury::{Module, Call, Storage, Config, Event}, } -} - +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account @@ -72,24 +70,18 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = Event; @@ -101,73 +93,31 @@ impl pallet_balances::Trait for Test { thread_local! { static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } -pub struct TenToFourteen; -impl Contains for TenToFourteen { - fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| { - v.borrow().clone() - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn add(new: &u128) { - TEN_TO_FOURTEEN.with(|v| { - let mut members = v.borrow_mut(); - members.push(*new); - members.sort(); - }) - } -} -impl ContainsLengthBound for TenToFourteen { - fn max_len() -> usize { - TEN_TO_FOURTEEN.with(|v| v.borrow().len()) - } - fn min_len() -> usize { 0 } -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: u64 = 1; pub const SpendPeriod: u64 = 2; pub const Burn: Permill = Permill::from_percent(50); - pub const TipCountdown: u64 = 1; - pub const TipFindersFee: Percent = Percent::from_percent(20); - pub const TipReportDepositBase: u64 = 1; - pub const DataDepositPerByte: u64 = 1; - pub const BountyDepositBase: u64 = 80; - pub const BountyDepositPayoutDelay: u64 = 3; pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); pub const BountyUpdatePeriod: u32 = 20; - pub const MaximumReasonLength: u32 = 16384; pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: u64 = 1; } -impl Trait for Test { +impl Config for Test { type ModuleId = TreasuryModuleId; type Currency = pallet_balances::Module; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Tippers = TenToFourteen; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type DataDepositPerByte = DataDepositPerByte; type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; - type BountyDepositBase = BountyDepositBase; - type BountyDepositPayoutDelay = BountyDepositPayoutDelay; - type BountyUpdatePeriod = BountyUpdatePeriod; - type BountyCuratorDeposit = BountyCuratorDeposit; - type BountyValueMinimum = BountyValueMinimum; - type MaximumReasonLength = MaximumReasonLength; type BurnDestination = (); // Just gets burned. type WeightInfo = (); + type SpendFunds = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Treasury = Module; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -175,19 +125,10 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); t.into() } -fn last_event() -> RawEvent { - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap() -} - #[test] fn genesis_config_works() { new_test_ext().execute_with(|| { @@ -196,163 +137,6 @@ fn genesis_config_works() { }); } -fn tip_hash() -> H256 { - BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) -} - -#[test] -fn tip_new_cannot_be_used_twice() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - assert_noop!( - Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), - Error::::AlreadyKnown - ); - }); -} - -#[test] -fn report_awesome_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - - // other reports don't count. - assert_noop!( - Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), - Error::::AlreadyKnown - ); - - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 102); - assert_eq!(Balances::free_balance(3), 8); - }); -} - -#[test] -fn report_awesome_from_beneficiary_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 110); - }); -} - -#[test] -fn close_tip_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - - let h = tip_hash(); - - assert_eq!(last_event(), RawEvent::NewTip(h)); - - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - - assert_eq!(last_event(), RawEvent::TipClosing(h)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); - - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::none(), h.into()), BadOrigin); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - - assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); - }); -} - -#[test] -fn retract_tip_works() { - new_test_ext().execute_with(|| { - // with report awesome - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); - - // with tip new - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(10), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); - }); -} - -#[test] -fn tip_median_calculation_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); -} - -#[test] -fn tip_changing_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); -} - #[test] fn minting_works() { new_test_ext().execute_with(|| { @@ -559,596 +343,6 @@ fn inexistent_account_works() { }); } -#[test] -fn propose_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); - - assert_eq!(last_event(), RawEvent::BountyProposed(0)); - - let deposit: u64 = 85 + 5; - assert_eq!(Balances::reserved_balance(0), deposit); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 10, - bond: deposit, - status: BountyStatus::Proposed, - }); - - assert_eq!(Treasury::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); - - assert_eq!(Treasury::bounty_count(), 1); - }); -} - -#[test] -fn propose_bounty_validation_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), - Error::::ReasonTooBig - ); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), - Error::::InsufficientProposersBalance - ); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), - Error::::InvalidValue - ); - }); -} - -#[test] -fn close_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); - - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - let deposit: u64 = 80 + 5; - - assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); - - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - assert_eq!(Treasury::bounties(0), None); - assert!(!Bounties::::contains_key(0)); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn approve_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Treasury::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - let deposit: u64 = 80 + 5; - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - value: 50, - curator_deposit: 0, - bond: deposit, - status: BountyStatus::Approved, - }); - assert_eq!(Treasury::bounty_approvals(), vec![0]); - - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); - - // deposit not returned yet - assert_eq!(Balances::reserved_balance(0), deposit); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - >::on_initialize(2); - - // return deposit - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 100); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: deposit, - status: BountyStatus::Funded, - }); - assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 50); - }); -} - -#[test] -fn assign_curator_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::CuratorProposed { - curator: 4, - }, - }); - - assert_noop!(Treasury::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); - assert_noop!(Treasury::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); - - Balances::make_free_balance_be(&4, 10); - - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::Active { - curator: 4, - update_due: 22, - }, - }); - - assert_eq!(Balances::free_balance(&4), 8); - assert_eq!(Balances::reserved_balance(&4), 2); - }); -} - -#[test] -fn unassign_curator_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - assert_noop!(Treasury::unassign_curator(Origin::signed(1), 0), BadOrigin); - - assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - Balances::make_free_balance_be(&4, 10); - - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(&4), 8); - assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 - }); -} - -#[test] -fn award_and_claim_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 10); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit - - assert_noop!(Treasury::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); - - assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::PendingPayout { - curator: 4, - beneficiary: 3, - unlock_at: 5 - }, - }); - - assert_noop!(Treasury::claim_bounty(Origin::signed(1), 0), Error::::Premature); - - System::set_block_number(5); - >::on_initialize(5); - - assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); - - assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); - - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); - - assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 - assert_eq!(Balances::free_balance(3), 56); - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn claim_handles_high_fee() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 30); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 49)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); - - System::set_block_number(5); - >::on_initialize(5); - - // make fee > balance - let _ = Balances::slash(&Treasury::bounty_account_id(0), 10); - - assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); - - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); - - assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn cancel_and_refund() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 60); - - assert_noop!(Treasury::close_bounty(Origin::signed(0), 0), BadOrigin); - - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - assert_eq!(Treasury::pot(), 85); // - 25 + 10 - }); -} - -#[test] -fn award_and_cancel() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 0, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(0), 0)); - - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - - assert_ok!(Treasury::award_bounty(Origin::signed(0), 0, 3)); - - // Cannot close bounty directly when payout is happening... - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::PendingPayout); - - // Instead unassign the curator to slash them and then close. - assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - assert_eq!(last_event(), RawEvent::BountyCanceled(0)); - - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - // Slashed. - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn expire_and_unassign() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 1, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(1), 0)); - - assert_eq!(Balances::free_balance(1), 93); - assert_eq!(Balances::reserved_balance(1), 5); - - System::set_block_number(22); - >::on_initialize(22); - - assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); - - System::set_block_number(23); - >::on_initialize(23); - - assert_ok!(Treasury::unassign_curator(Origin::signed(0), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(1), 93); - assert_eq!(Balances::reserved_balance(1), 0); // slashed - - }); -} - -#[test] -fn extend_expiry() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 10); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 5); - assert_eq!(Balances::reserved_balance(4), 5); - - System::set_block_number(10); - >::on_initialize(10); - - assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); - assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, - }); - - assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same - }); - - System::set_block_number(25); - >::on_initialize(25); - - assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); - assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 10); // not slashed - assert_eq!(Balances::reserved_balance(4), 0); - }); -} - -#[test] -fn test_last_reward_migration() { - use sp_storage::Storage; - - let mut s = Storage::default(); - - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] - pub struct OldOpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, - > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - } - - let reason1 = BlakeTwo256::hash(b"reason1"); - let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); - - let old_tip_finder = OldOpenTip:: { - reason: reason1, - who: 10, - finder: Some((20, 30)), - closes: Some(13), - tips: vec![(40, 50), (60, 70)] - }; - - let reason2 = BlakeTwo256::hash(b"reason2"); - let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); - - let old_tip_no_finder = OldOpenTip:: { - reason: reason2, - who: 20, - finder: None, - closes: Some(13), - tips: vec![(40, 50), (60, 70)] - }; - - let data = vec![ - ( - Tips::::hashed_key_for(hash1), - old_tip_finder.encode().to_vec() - ), - ( - Tips::::hashed_key_for(hash2), - old_tip_no_finder.encode().to_vec() - ), - ]; - - s.top = data.into_iter().collect(); - sp_io::TestExternalities::new(s).execute_with(|| { - Treasury::migrate_retract_tip_for_tip_new(); - - // Test w/ finder - assert_eq!( - Tips::::get(hash1), - Some(OpenTip { - reason: reason1, - who: 10, - finder: 20, - deposit: 30, - closes: Some(13), - tips: vec![(40, 50), (60, 70)], - finders_fee: true, - }) - ); - - // Test w/o finder - assert_eq!( - Tips::::get(hash2), - Some(OpenTip { - reason: reason2, - who: 20, - finder: Default::default(), - deposit: 0, - closes: Some(13), - tips: vec![(40, 50), (60, 70)], - finders_fee: false, - }) - ); - }); -} - #[test] fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -1157,7 +351,7 @@ fn genesis_funding_works() { // Total issuance will be 200 with treasury account initialized with 100. balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); let mut t: sp_io::TestExternalities = t.into(); t.execute_with(|| { diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 646b9869f47ef..ea939396c5f1a 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,13 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_treasury +//! Autogenerated weights for pallet_treasury +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-16, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: -// target/release/substrate +// ./target/release/substrate // benchmark // --chain=dev // --steps=50 @@ -46,293 +47,62 @@ pub trait WeightInfo { fn propose_spend() -> Weight; fn reject_proposal() -> Weight; fn approve_proposal() -> Weight; - fn report_awesome(r: u32, ) -> Weight; - fn retract_tip() -> Weight; - fn tip_new(r: u32, t: u32, ) -> Weight; - fn tip(t: u32, ) -> Weight; - fn close_tip(t: u32, ) -> Weight; - fn propose_bounty(d: u32, ) -> Weight; - fn approve_bounty() -> Weight; - fn propose_curator() -> Weight; - fn unassign_curator() -> Weight; - fn accept_curator() -> Weight; - fn award_bounty() -> Weight; - fn claim_bounty() -> Weight; - fn close_bounty_proposed() -> Weight; - fn close_bounty_active() -> Weight; - fn extend_bounty_expiry() -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; - } /// Weights for pallet_treasury using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn propose_spend() -> Weight { - (56_844_000 as Weight) + (59_986_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn reject_proposal() -> Weight { - (46_098_000 as Weight) + (48_300_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn approve_proposal() -> Weight { - (13_622_000 as Weight) + (14_054_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) + (86_038_000 as Weight) + // Standard Error: 18_000 + .saturating_add((78_781_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn propose_spend() -> Weight { - (56_844_000 as Weight) + (59_986_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn reject_proposal() -> Weight { - (46_098_000 as Weight) + (48_300_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn approve_proposal() -> Weight { - (13_622_000 as Weight) + (14_054_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) + (86_038_000 as Weight) + // Standard Error: 18_000 + .saturating_add((78_781_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } - } diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 098730aa30083..edb930231e176 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/utility/README.md b/frame/utility/README.md index 3963969291180..f7c0923cd5497 100644 --- a/frame/utility/README.md +++ b/frame/utility/README.md @@ -33,6 +33,6 @@ filtered by any proxy. * `as_derivative` - Dispatch a call from a derivative signed origin. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 413ed66ac8498..24de60215799f 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,20 +25,18 @@ use frame_benchmarking::{benchmarks, account, whitelisted_caller}; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } benchmarks! { - _ { } - batch { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); @@ -59,7 +57,7 @@ benchmarks! { batch_all { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index e7ff09c8f0db1..28345e5ffe72d 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ //! # Utility Module //! A stateless module with helpers for dispatch management which does no re-authentication. //! -//! - [`utility::Trait`](./trait.Trait.html) +//! - [`utility::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -50,7 +50,7 @@ //! * `as_derivative` - Dispatch a call from a derivative signed origin. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -74,9 +74,9 @@ use sp_runtime::{DispatchError, traits::Dispatchable}; pub use weights::WeightInfo; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable @@ -88,7 +88,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Utility {} + trait Store for Module as Utility {} } decl_event! { @@ -111,7 +111,7 @@ impl TypeId for IndexedUtilityModuleId { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Deposit one of this module's events by using the default implementation. fn deposit_event() = default; @@ -122,7 +122,7 @@ decl_module! { /// - `calls`: The calls to be dispatched from the same origin. /// /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Trait::BaseCallFilter`). + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -133,23 +133,25 @@ decl_module! { /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. - #[weight = ( - calls.iter() - .map(|call| call.get_dispatch_info().weight) + #[weight = { + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); + let dispatch_weight = dispatch_infos.iter() + .map(|di| di.weight) .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch(calls.len() as u32)), - { - let all_operational = calls.iter() - .map(|call| call.get_dispatch_info().class) + .saturating_add(T::WeightInfo::batch(calls.len() as u32)); + let dispatch_class = { + let all_operational = dispatch_infos.iter() + .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } - }, - )] - fn batch(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { + }; + (dispatch_weight, dispatch_class) + }] + fn batch(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); // Track the actual weight of each of the batch calls. @@ -190,14 +192,17 @@ decl_module! { /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. - #[weight = ( - T::WeightInfo::as_derivative() - .saturating_add(call.get_dispatch_info().weight) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class, - )] - fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResultWithPostInfo { + #[weight = { + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::as_derivative() + .saturating_add(dispatch_info.weight) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + }] + fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); @@ -222,29 +227,31 @@ decl_module! { /// - `calls`: The calls to be dispatched from the same origin. /// /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Trait::BaseCallFilter`). + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. /// # - #[weight = ( - calls.iter() - .map(|call| call.get_dispatch_info().weight) + #[weight = { + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); + let dispatch_weight = dispatch_infos.iter() + .map(|di| di.weight) .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)), - { - let all_operational = calls.iter() - .map(|call| call.get_dispatch_info().class) + .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); + let dispatch_class = { + let all_operational = dispatch_infos.iter() + .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } - }, - )] + }; + (dispatch_weight, dispatch_class) + }] #[transactional] - fn batch_all(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { + fn batch_all(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); // Track the actual weight of each of the batch calls. @@ -274,7 +281,7 @@ decl_module! { } } -impl Module { +impl Module { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index a3c33bdf2081f..af31bbe96cbc4 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,25 +22,24 @@ use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, impl_outer_event, - assert_err_ignore_postinfo, + assert_ok, assert_noop, parameter_types, assert_err_ignore_postinfo, weights::{Weight, Pays}, dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable}, traits::Filter, storage, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use crate as utility; // example module to test behaviors. pub mod example { use super::*; use frame_support::dispatch::WithPostDispatchInfo; - pub trait Trait: frame_system::Trait { } + pub trait Config: frame_system::Config { } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { #[weight = *weight] fn noop(_origin, weight: Weight) { } @@ -67,38 +66,32 @@ pub mod example { } } -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event! { - pub enum TestEvent for Test { - frame_system, - pallet_balances, - utility, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - utility::Utility, - example::Example, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Utility: utility::{Module, Call, Event}, + Example: example::{Module, Call}, } -} +); -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::max_value(); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::max_value()); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -108,30 +101,24 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); - type Event = TestEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -142,13 +129,14 @@ parameter_types! { pub const MaxSignatories: u16 = 3; } -impl example::Trait for Test {} +impl example::Config for Test {} pub struct TestBaseCallFilter; impl Filter for TestBaseCallFilter { fn filter(c: &Call) -> bool { match *c { - Call::Balances(_) => true, + // Transfer works. Use `transfer_keep_alive` for a call that doesn't pass the filter. + Call::Balances(pallet_balances::Call::transfer(..)) => true, Call::Utility(_) => true, // For benchmarking, this acts as a noop call Call::System(frame_system::Call::remark(..)) => true, @@ -158,15 +146,11 @@ impl Filter for TestBaseCallFilter { } } } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type WeightInfo = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Example = example::Module; -type Utility = Module; type ExampleCall = example::Call; type UtilityCall = crate::Call; @@ -185,11 +169,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn last_event() -> TestEvent { +fn last_event() -> Event { frame_system::Module::::events().pop().map(|e| e.event).expect("Event expected") } -fn expect_event>(e: E) { +fn expect_event>(e: E) { assert_eq!(last_event(), e.into()); } @@ -279,7 +263,7 @@ fn as_derivative_filters() { assert_err_ignore_postinfo!(Utility::as_derivative( Origin::signed(1), 1, - Box::new(Call::System(frame_system::Call::suicide())), + Box::new(Call::Balances(pallet_balances::Call::transfer_keep_alive(2, 1))), ), DispatchError::BadOrigin); }); } @@ -324,10 +308,10 @@ fn batch_with_signed_filters() { new_test_ext().execute_with(|| { assert_ok!( Utility::batch(Origin::signed(1), vec![ - Call::System(frame_system::Call::suicide()) + Call::Balances(pallet_balances::Call::transfer_keep_alive(2, 1)) ]), ); - expect_event(Event::BatchInterrupted(0, DispatchError::BadOrigin)); + expect_event(utility::Event::BatchInterrupted(0, DispatchError::BadOrigin)); }); } @@ -350,6 +334,7 @@ fn batch_early_exit_works() { #[test] fn batch_weight_calculation_doesnt_overflow() { + use sp_runtime::Perbill; new_test_ext().execute_with(|| { let big_call = Call::System(SystemCall::fill_block(Perbill::from_percent(50))); assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); @@ -400,7 +385,7 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); - expect_event(Event::BatchInterrupted(1, DispatchError::Other(""))); + expect_event(utility::Event::BatchInterrupted(1, DispatchError::Other(""))); // No weight is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -413,7 +398,7 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); - expect_event(Event::BatchInterrupted(1, DispatchError::Other(""))); + expect_event(utility::Event::BatchInterrupted(1, DispatchError::Other(""))); assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); // Partial batch completion @@ -424,11 +409,11 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); - expect_event(Event::BatchInterrupted(1, DispatchError::Other(""))); + expect_event(utility::Event::BatchInterrupted(1, DispatchError::Other(""))); assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight - ::WeightInfo::batch(2) + end_weight * 2, + ::WeightInfo::batch(2) + end_weight * 2, ); }); } @@ -465,7 +450,7 @@ fn batch_all_revert() { ]), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(::WeightInfo::batch_all(2) + info.weight * 2), + actual_weight: Some(::WeightInfo::batch_all(2) + info.weight * 2), pays_fee: Pays::Yes }, error: pallet_balances::Error::::InsufficientBalance.into() @@ -536,7 +521,7 @@ fn batch_all_handles_weight_refund() { assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight - ::WeightInfo::batch_all(2) + end_weight * 2, + ::WeightInfo::batch_all(2) + end_weight * 2, ); }); } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 73e4e3b1d93b9..5e2eb39f6ef54 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -51,7 +51,7 @@ pub trait WeightInfo { /// Weights for pallet_utility using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn batch(c: u32, ) -> Weight { (20_071_000 as Weight) .saturating_add((2_739_000 as Weight).saturating_mul(c as Weight)) diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index bea64c2b4f94d..dc42fbcbab108 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-vesting" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } hex-literal = "0.3.1" [features] diff --git a/frame/vesting/README.md b/frame/vesting/README.md index 921fa94a1a2a9..811b0dc44152d 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -26,6 +26,6 @@ This module implements the `VestingSchedule` trait. "vested" so far. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 652d10aab3ae4..f65011050422b 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,9 +29,9 @@ use crate::Module as Vesting; const SEED: u32 = 0; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -fn add_locks(who: &T::AccountId, n: u8) { +fn add_locks(who: &T::AccountId, n: u8) { for id in 0..n { let lock_id = [id; 8]; let locked = 100u32; @@ -40,7 +40,7 @@ fn add_locks(who: &T::AccountId, n: u8) { } } -fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str> { +fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str> { let locked = 100u32; let per_block = 10u32; let starting_block = 1u32; @@ -58,8 +58,6 @@ fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str } benchmarks! { - _ { } - vest_locked { let l in 0 .. MaxLocksOf::::get(); diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8b78eac4fedf8..9cf9166b37c0c 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! # Vesting Module //! -//! - [`vesting::Trait`](./trait.Trait.html) +//! - [`vesting::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -43,7 +43,7 @@ //! "vested" so far. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -64,12 +64,12 @@ use frame_support::traits::{ use frame_system::{ensure_signed, ensure_root}; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency trait. type Currency: LockableCurrency; @@ -120,7 +120,7 @@ impl< } decl_storage! { - trait Store for Module as Vesting { + trait Store for Module as Vesting { /// Information regarding the vesting of a given account. pub Vesting get(fn vesting): map hasher(blake2_128_concat) T::AccountId @@ -156,7 +156,7 @@ decl_storage! { } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// The amount vested has been updated. This could indicate more funds are available. The /// balance given is the amount which is left unvested (and thus locked). /// \[account, unvested\] @@ -168,7 +168,7 @@ decl_event!( decl_error! { /// Error for the vesting module. - pub enum Error for Module { + pub enum Error for Module { /// The account given is not vesting. NotVesting, /// An existing vesting schedule already exists for this account that cannot be clobbered. @@ -180,7 +180,7 @@ decl_error! { decl_module! { /// Vesting module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The minimum amount to be transferred to create a new vesting schedule. @@ -309,7 +309,7 @@ decl_module! { } } -impl Module { +impl Module { /// (Re)set or remove the module's currency lock on `who`'s account in accordance with their /// current unvested amount. fn update_lock(who: T::AccountId) -> DispatchResult { @@ -330,7 +330,7 @@ impl Module { } } -impl VestingSchedule for Module where +impl VestingSchedule for Module where BalanceOf: MaybeSerializeDeserialize + Debug { type Moment = T::BlockNumber; @@ -389,66 +389,67 @@ impl VestingSchedule for Module where #[cfg(test)] mod tests { use super::*; + use crate as pallet_vesting; - use std::cell::RefCell; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - traits::Get - }; + use frame_support::{assert_ok, assert_noop, parameter_types}; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, Identity, BadOrigin}, }; use frame_system::RawOrigin; - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } parameter_types! { pub const MaxLocks: u32 = 10; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = (); + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = MaxLocks; @@ -456,25 +457,15 @@ mod tests { } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; + pub static ExistentialDeposit: u64 = 0; } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type Currency = Balances; type BlockNumberToBalance = Identity; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Vesting = Module; - - thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - } - pub struct ExistentialDeposit; - impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } - } pub struct ExtBuilder { existential_deposit: u64, @@ -503,7 +494,7 @@ mod tests { (12, 10 * self.existential_deposit) ], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig:: { + pallet_vesting::GenesisConfig:: { vesting: vec![ (1, 0, 10, 5 * self.existential_deposit), (2, 10, 20, 0), diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 23a46ec763d89..f4a1ee3669101 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -54,7 +54,7 @@ pub trait WeightInfo { /// Weights for pallet_vesting using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn vest_locked(l: u32, ) -> Weight { (57_472_000 as Weight) .saturating_add((155_000 as Weight).saturating_mul(l as Weight)) diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index 93991a4aeb2ab..1c38cbbb9c26e 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-allocator" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,11 +14,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", path = "../std", default-features = false } -sp-core = { version = "2.0.0", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } -log = { version = "0.4.8", optional = true } -derive_more = { version = "0.99.2", optional = true } +sp-std = { version = "3.0.0", path = "../std", default-features = false } +sp-core = { version = "3.0.0", path = "../core", default-features = false } +sp-wasm-interface = { version = "3.0.0", path = "../wasm-interface", default-features = false } +log = { version = "0.4.11", optional = true } +thiserror = { version = "1.0.21", optional = true } [features] default = [ "std" ] @@ -27,5 +27,5 @@ std = [ "sp-core/std", "sp-wasm-interface/std", "log", - "derive_more", + "thiserror", ] diff --git a/primitives/allocator/README.md b/primitives/allocator/README.md index 361feaae591f9..cd845e2b028eb 100644 --- a/primitives/allocator/README.md +++ b/primitives/allocator/README.md @@ -1,6 +1,6 @@ Collection of allocator implementations. This crate provides the following allocator implementations: -- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) +- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](https://docs.rs/sp-allocator/latest/sp_allocator/struct.FreeingBumpHeapAllocator.html) License: Apache-2.0 \ No newline at end of file diff --git a/primitives/allocator/src/error.rs b/primitives/allocator/src/error.rs index 7b634af4d5b29..8464cd225d00e 100644 --- a/primitives/allocator/src/error.rs +++ b/primitives/allocator/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,15 @@ /// The error type used by the allocators. #[derive(sp_core::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(derive_more::Display))] +#[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum Error { /// Someone tried to allocate more memory than the allowed maximum per allocation. - #[cfg_attr(feature = "std", display(fmt="Requested allocation size is too large"))] + #[cfg_attr(feature = "std", error("Requested allocation size is too large"))] RequestedAllocationTooLarge, /// Allocator run out of space. - #[cfg_attr(feature = "std", display(fmt="Allocator ran out of space"))] + #[cfg_attr(feature = "std", error("Allocator ran out of space"))] AllocatorOutOfSpace, /// Some other error occurred. + #[cfg_attr(feature = "std", error("Other: {0}"))] Other(&'static str) } - -#[cfg(feature = "std")] -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - _ => None, - } - } -} diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index 19d7866e1b53b..14746c8784f8d 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/allocator/src/lib.rs b/primitives/allocator/src/lib.rs index b7cfce8048354..7d45fb5f368c7 100644 --- a/primitives/allocator/src/lib.rs +++ b/primitives/allocator/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index a3c480e92135f..20987035ef2f0 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,14 +13,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-api-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-api-proc-macro = { version = "3.0.0", path = "proc-macro" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +sp-version = { version = "3.0.0", default-features = false, path = "../version" } +sp-state-machine = { version = "0.9.0", optional = true, path = "../state-machine" } hash-db = { version = "0.15.2", optional = true } +thiserror = { version = "1.0.21", optional = true } [dev-dependencies] sp-test-primitives = { version = "2.0.0", path = "../test-primitives" } @@ -35,4 +36,5 @@ std = [ "sp-state-machine", "sp-version/std", "hash-db", + "thiserror", ] diff --git a/primitives/api/README.md b/primitives/api/README.md index 551de2f82e365..1cf9437373c77 100644 --- a/primitives/api/README.md +++ b/primitives/api/README.md @@ -3,8 +3,8 @@ Substrate runtime api The Substrate runtime api is the crucial interface between the node and the runtime. Every call that goes into the runtime is done with a runtime api. The runtime apis are not fixed. Every Substrate user can define its own apis with -[`decl_runtime_apis`](macro.decl_runtime_apis.html) and implement them in -the runtime with [`impl_runtime_apis`](macro.impl_runtime_apis.html). +[`decl_runtime_apis`](https://docs.rs/sp-api/latest/sp_api/macro.decl_runtime_apis.html) and implement them in +the runtime with [`impl_runtime_apis`](https://docs.rs/sp-api/latest/sp_api/macro.impl_runtime_apis.html). Every Substrate runtime needs to implement the [`Core`] runtime api. This api provides the basic functionality that every runtime needs to export. diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 9b1661cf5ef60..450ce64b2b6c1 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.3" -syn = { version = "1.0.8", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "1.0.58", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.6" blake2-rfc = { version = "0.2.18", default-features = false } proc-macro-crate = "0.1.4" diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index a628ade6f9b47..ed5f33ef603e5 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -194,7 +194,7 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { ::decode_with_depth_limit( #crate_::MAX_EXTRINSIC_DEPTH, &mut &#crate_::Encode::encode(input)[..], - ).map_err(|e| format!("{} {}", error_desc, e.what())) + ).map_err(|e| format!("{} {}", error_desc, e)) } )); @@ -409,7 +409,6 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { at: &#crate_::BlockId, args: Vec, changes: &std::cell::RefCell<#crate_::OverlayedChanges>, - offchain_changes: &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, storage_transaction_cache: &std::cell::RefCell< #crate_::StorageTransactionCache >, @@ -439,7 +438,6 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { native_call: None, arguments: args, overlayed_changes: changes, - offchain_changes, storage_transaction_cache, initialize_block, context, @@ -460,7 +458,6 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { native_call, arguments: args, overlayed_changes: changes, - offchain_changes, storage_transaction_cache, initialize_block, context, @@ -708,13 +705,7 @@ impl<'a> ToClientSideDecl<'a> { }, #crate_::NativeOrEncoded::Encoded(r) => { <#ret_type as #crate_::Decode>::decode(&mut &r[..]) - .map_err(|err| - format!( - "Failed to decode result of `{}`: {}", - #function_name, - err.what(), - ).into() - ) + .map_err(|err| { #crate_::ApiError::new(#function_name, err).into() }) } } ) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 85f5a1797b1e3..f8d7c74b9738b 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -86,7 +86,7 @@ fn generate_impl_call( &#input, ) { Ok(res) => res, - Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), + Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e), }; #[allow(deprecated)] @@ -208,7 +208,6 @@ fn generate_runtime_api_base_structures() -> Result { commit_on_success: std::cell::RefCell, initialized_block: std::cell::RefCell>>, changes: std::cell::RefCell<#crate_::OverlayedChanges>, - offchain_changes: std::cell::RefCell<#crate_::OffchainOverlayedChanges>, storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, @@ -338,7 +337,6 @@ fn generate_runtime_api_base_structures() -> Result { commit_on_success: true.into(), initialized_block: None.into(), changes: Default::default(), - offchain_changes: Default::default(), recorder: Default::default(), storage_transaction_cache: Default::default(), }.into() @@ -357,7 +355,6 @@ fn generate_runtime_api_base_structures() -> Result { &C, &Self, &std::cell::RefCell<#crate_::OverlayedChanges>, - &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, &std::cell::RefCell>>, &Option<#crate_::ProofRecorder>, @@ -374,7 +371,6 @@ fn generate_runtime_api_base_structures() -> Result { &self.call, self, &self.changes, - &self.offchain_changes, &self.storage_transaction_cache, &self.initialized_block, &self.recorder, @@ -531,7 +527,6 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { call_runtime_at, core_api, changes, - offchain_changes, storage_transaction_cache, initialized_block, recorder @@ -542,7 +537,6 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { at, params_encoded, changes, - offchain_changes, storage_transaction_cache, initialized_block, params.map(|p| { diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 4dd48094683d9..30767efd41c11 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 3e2fd42951b3c..c6ff98c0f1dc5 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -69,7 +69,9 @@ fn implement_common_api_traits( ) -> Result { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let error_type = error_type.map(|e| quote!(#e)).unwrap_or_else(|| quote!(String)); + let error_type = error_type + .map(|e| quote!(#e)) + .unwrap_or_else(|| quote!( #crate_::ApiError ) ); // Quote using the span from `error_type` to generate nice error messages when the type is // not implementing a trait or similar. diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 534ddcfddd96e..dbe7c723af0b6 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 9dadce3b55452..8ce447c0d3667 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,8 +47,6 @@ pub use sp_core::NativeOrEncoded; #[doc(hidden)] #[cfg(feature = "std")] pub use hash_db::Hasher; -#[cfg(feature = "std")] -pub use sp_core::offchain::storage::OffchainOverlayedChanges; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; @@ -74,6 +72,7 @@ use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use std::{panic::UnwindSafe, cell::RefCell}; + /// Maximum nesting level for extrinsics. pub const MAX_EXTRINSIC_DEPTH: u32 = 256; @@ -288,7 +287,7 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// /// Sets the error type that is being used by the mock implementation. /// /// The error type is used by all runtime apis. It is only required to /// /// be specified in one trait implementation. -/// type Error = String; +/// type Error = sp_api::ApiError; /// /// fn build_block() -> Block { /// unimplemented!("Not Required in tests") @@ -315,6 +314,7 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// # use sp_test_primitives::Block; /// # use sp_core::NativeOrEncoded; +/// # use codec; /// # /// # sp_api::decl_runtime_apis! { /// # /// Declare the api trait. @@ -331,15 +331,15 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { -/// type Error = String; +/// type Error = sp_api::ApiError; /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result, String> { +/// fn get_balance(&self, at: &BlockId) -> Result, Self::Error> { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result, String> { +/// fn set_balance(at: &BlockId, val: u64) -> Result, Self::Error> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } @@ -392,12 +392,42 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; } +/// An error describing which API call failed. +#[cfg_attr(feature = "std", derive(Debug, thiserror::Error, Eq, PartialEq))] +#[cfg_attr(feature = "std", error("Failed to execute API call {tag}"))] +#[cfg(feature = "std")] +pub struct ApiError { + tag: &'static str, + #[source] + error: codec::Error, +} + +#[cfg(feature = "std")] +impl From<(&'static str, codec::Error)> for ApiError { + fn from((tag, error): (&'static str, codec::Error)) -> Self { + Self { + tag, + error, + } + } +} + +#[cfg(feature = "std")] +impl ApiError { + pub fn new(tag: &'static str, error: codec::Error) -> Self { + Self { + tag, + error, + } + } +} + /// Extends the runtime api traits with an associated error type. This trait is given as super /// trait to every runtime api trait. #[cfg(feature = "std")] pub trait ApiErrorExt { /// Error type used by the runtime apis. - type Error: std::fmt::Debug + From; + type Error: std::fmt::Debug + From; } /// Extends the runtime api implementation with some common functionality. @@ -489,8 +519,6 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend, /// The overlayed changes that are on top of the state. pub overlayed_changes: &'a RefCell, - /// The overlayed changes to be applied to the offchain worker database. - pub offchain_changes: &'a RefCell, /// The cache for storage transactions. pub storage_transaction_cache: &'a RefCell>, /// Determines if the function requires that `initialize_block` should be called before calling @@ -506,7 +534,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend { /// Error type used by the implementation. - type Error: std::fmt::Debug + From; + type Error: std::fmt::Debug + From; /// The state backend that is used to store the block states. type StateBackend: StateBackend>; diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 867cdd6e57e48..e8f06aaf20e1b 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-test" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,22 +12,22 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", path = "../" } +sp-api = { version = "3.0.0", path = "../" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0", path = "../../version" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-blockchain = { version = "2.0.0", path = "../../blockchain" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -trybuild = "1.0.17" +sp-version = { version = "3.0.0", path = "../../version" } +sp-runtime = { version = "3.0.0", path = "../../runtime" } +sp-blockchain = { version = "3.0.0", path = "../../blockchain" } +sp-consensus = { version = "0.9.0", path = "../../consensus/common" } +sc-block-builder = { version = "0.9.0", path = "../../../client/block-builder" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-state-machine = { version = "0.9.0", path = "../../state-machine" } +trybuild = "1.0.38" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-core = { version = "2.0.0", path = "../../core" } +sp-core = { version = "3.0.0", path = "../../core" } [[bench]] name = "bench" diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 280b707902873..20ddbbe7116dc 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 594882baf1e34..134ee5085658a 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,6 +17,7 @@ use sp_api::{ RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, + ApiError, ApiExt, }; use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; @@ -103,17 +104,27 @@ mock_impl_runtime_apis! { } #[advanced] - fn same_name(_: &BlockId) -> std::result::Result, String> { + fn same_name(_: &BlockId) -> + std::result::Result< + NativeOrEncoded<()>, + ApiError + > + { Ok(().into()) } #[advanced] - fn wild_card(at: &BlockId, _: u32) -> std::result::Result, String> { + fn wild_card(at: &BlockId, _: u32) -> + std::result::Result< + NativeOrEncoded<()>, + ApiError + > + { if let BlockId::Number(1337) = at { // yeah Ok(().into()) } else { - Err("Ohh noooo".into()) + Err(ApiError::new("MockApi", codec::Error::from("Ohh noooo"))) } } } @@ -197,5 +208,8 @@ fn mock_runtime_api_works_with_advanced() { Api::::same_name(&mock, &BlockId::Number(0)).unwrap(); mock.wild_card(&BlockId::Number(1337), 1).unwrap(); - assert_eq!(String::from("Ohh noooo"), mock.wild_card(&BlockId::Number(1336), 1).unwrap_err()); + assert_eq!( + ApiError::new("MockApi", ::codec::Error::from("Ohh noooo")), + mock.wild_card(&BlockId::Number(1336), 1).unwrap_err() + ); } diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index d72872959cefa..ec1a86d8379fc 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 2f7fd6d06bcd3..5a6025f463af0 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 851d2b8a4b652..fcda69533e3ad 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -23,8 +23,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::string::String` | - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> std::result::Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index 1e71730cd0a17..fd654ffdc63d6 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -1,4 +1,5 @@ use substrate_test_runtime_client::runtime::Block; +use sp_api::ApiError; sp_api::decl_runtime_apis! { pub trait Api { @@ -11,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self, _: BlockId) -> Result, String> { + fn test(&self, _: BlockId) -> Result, ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index efddce05f51b4..47cd9e01d910f 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -1,13 +1,13 @@ error: `BlockId` needs to be taken by reference and not by value! - --> $DIR/mock_advanced_block_id_by_value.rs:11:1 + --> $DIR/mock_advanced_block_id_by_value.rs:12:1 | -11 | / sp_api::mock_impl_runtime_apis! { -12 | | impl Api for MockApi { -13 | | #[advanced] -14 | | fn test(&self, _: BlockId) -> Result, String> { +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | #[advanced] +15 | | fn test(&self, _: BlockId) -> Result, ApiError> { ... | -17 | | } -18 | | } +18 | | } +19 | | } | |_^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index 407ea90ee882d..a15ef133fa6c4 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -1,4 +1,5 @@ use substrate_test_runtime_client::runtime::Block; +use sp_api::ApiError; sp_api::decl_runtime_apis! { pub trait Api { @@ -11,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self) -> Result, String> { + fn test(&self) -> Result, ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr index e7a66ebc5dba8..87d3660316b1e 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -1,5 +1,5 @@ error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. - --> $DIR/mock_advanced_missing_blockid.rs:14:3 + --> $DIR/mock_advanced_missing_blockid.rs:15:3 | -14 | fn test(&self) -> Result, String> { +15 | fn test(&self) -> Result, ApiError> { | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr index daac5674d6ffe..ab5b90af3ad1b 100644 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr +++ b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr @@ -10,20 +10,20 @@ error: First error type was declared here. 17 | type Error = u32; | ^^^ -error[E0277]: the trait bound `u32: std::convert::From` is not satisfied +error[E0277]: the trait bound `u32: From` is not satisfied --> $DIR/mock_only_one_error_type.rs:17:16 | 17 | type Error = u32; - | ^^^ the trait `std::convert::From` is not implemented for `u32` + | ^^^ the trait `From` is not implemented for `u32` | ::: $WORKSPACE/primitives/api/src/lib.rs | - | type Error: std::fmt::Debug + From; - | ------------ required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt` + | type Error: std::fmt::Debug + From; + | -------------- required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt::Error` | = help: the following implementations were found: - > - > - > - > + > + > + > + > and 18 others diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index ed5b64144a6f6..73cf936103798 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -24,8 +24,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 12 | sp_api::mock_impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` | - = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> std::result::Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait @@ -42,6 +42,6 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t 12 | sp_api::mock_impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` | - = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> std::result::Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index c3e4850036090..71f12b415a2b5 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -23,8 +23,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `&u64` | - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<&u64>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> std::result::Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 2ab6823759572..fff289e9a1d86 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." @@ -15,11 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-io = { version = "3.0.0", default-features = false, path = "../io" } [features] default = [ "std" ] diff --git a/primitives/application-crypto/src/ecdsa.rs b/primitives/application-crypto/src/ecdsa.rs index 287ac8f3afcff..fe54dab39eef8 100644 --- a/primitives/application-crypto/src/ecdsa.rs +++ b/primitives/application-crypto/src/ecdsa.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Ecdsa crypto types. diff --git a/primitives/application-crypto/src/ed25519.rs b/primitives/application-crypto/src/ed25519.rs index e761745cf5425..98eb4727df63e 100644 --- a/primitives/application-crypto/src/ed25519.rs +++ b/primitives/application-crypto/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 12e11d690541a..d085d961a1026 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/application-crypto/src/sr25519.rs b/primitives/application-crypto/src/sr25519.rs index 4700e0f756717..f3ce867858339 100644 --- a/primitives/application-crypto/src/sr25519.rs +++ b/primitives/application-crypto/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/application-crypto/src/traits.rs b/primitives/application-crypto/src/traits.rs index f06e194aefddf..8daa866af63ed 100644 --- a/primitives/application-crypto/src/traits.rs +++ b/primitives/application-crypto/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 0e7fdc7559ca1..92a2ea8f3b8c8 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -13,9 +13,9 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -sp-keystore = { version = "0.8.0", path = "../../keystore" } +sp-core = { version = "3.0.0", default-features = false, path = "../../core" } +sp-keystore = { version = "0.9.0", path = "../../keystore", default-features = false } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-api = { version = "2.0.0", path = "../../api" } -sp-application-crypto = { version = "2.0.0", path = "../" } +sp-runtime = { version = "3.0.0", path = "../../runtime" } +sp-api = { version = "3.0.0", path = "../../api" } +sp-application-crypto = { version = "3.0.0", path = "../" } diff --git a/primitives/application-crypto/test/src/ecdsa.rs b/primitives/application-crypto/test/src/ecdsa.rs index 89def7cd68770..5ad10e79ef96f 100644 --- a/primitives/application-crypto/test/src/ecdsa.rs +++ b/primitives/application-crypto/test/src/ecdsa.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Integration tests for ecdsa use std::sync::Arc; diff --git a/primitives/application-crypto/test/src/ed25519.rs b/primitives/application-crypto/test/src/ed25519.rs index 9df198dc4f9d2..06b962f1902bc 100644 --- a/primitives/application-crypto/test/src/ed25519.rs +++ b/primitives/application-crypto/test/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/application-crypto/test/src/lib.rs b/primitives/application-crypto/test/src/lib.rs index b78539239691a..bee926f8dd8c1 100644 --- a/primitives/application-crypto/test/src/lib.rs +++ b/primitives/application-crypto/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/application-crypto/test/src/sr25519.rs b/primitives/application-crypto/test/src/sr25519.rs index f96d7b7ef0006..889f662b68140 100644 --- a/primitives/application-crypto/test/src/sr25519.rs +++ b/primitives/application-crypto/test/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index b8e482491a7d4..76751cdee81bd 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0", default-features = false, path = "../../primitives/debug-derive" } +sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" } [dev-dependencies] rand = "0.7.2" criterion = "0.3" serde_json = "1.0" -primitive-types = "0.7.0" +primitive-types = "0.9.0" [features] default = ["std"] diff --git a/primitives/arithmetic/benches/bench.rs b/primitives/arithmetic/benches/bench.rs index 7a576c8af144b..fd535c1d2d0ff 100644 --- a/primitives/arithmetic/benches/bench.rs +++ b/primitives/arithmetic/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 6a28142f9e825..2666dde9016a8 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -14,9 +14,9 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-arithmetic = { version = "2.0.0", path = ".." } +sp-arithmetic = { version = "3.0.0", path = ".." } honggfuzz = "0.5.49" -primitive-types = "0.7.0" +primitive-types = "0.9.0" num-bigint = "0.2" num-traits = "0.2" diff --git a/primitives/arithmetic/fuzzer/src/biguint.rs b/primitives/arithmetic/fuzzer/src/biguint.rs index 481ac5561dda2..57be7f5342043 100644 --- a/primitives/arithmetic/fuzzer/src/biguint.rs +++ b/primitives/arithmetic/fuzzer/src/biguint.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/fuzzer/src/fixed_point.rs b/primitives/arithmetic/fuzzer/src/fixed_point.rs index 9a88197ac32ad..db415ecb84c75 100644 --- a/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs index 5d06df3f1f8a2..40f315ce755d1 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs index 3c1759d568523..48d52ba71bab6 100644 --- a/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index 8ddbd0c6d59d9..ff172b8bd2704 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index 03f2bb1e55f6f..210cba8e2b1fe 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -212,9 +212,9 @@ impl BigUint { let mut needs_borrow = false; let mut t = 0; - if let Some(v) = u.checked_sub(v) { - if let Some(v2) = v.checked_sub(k) { - t = v2 % B; + if let Some(v1) = u.checked_sub(v) { + if let Some(v2) = v1.checked_sub(k) { + t = v2; k = 0; } else { needs_borrow = true; @@ -228,9 +228,9 @@ impl BigUint { } t }; - // PROOF: t either comes from `v2 % B`, or from `u + B - v - k`. The former is + // PROOF: t either comes from `v2`, or from `u + B - v - k`. The former is // trivial. The latter will not overflow this branch will only happen if the sum of - // `u - v - k` part has been negative, hence `u + B - v - k < b`. + // `u - v - k` part has been negative, hence `u + B - v - k < B`. w.set(j, s as Single); } @@ -287,9 +287,9 @@ impl BigUint { let mut out = Self::with_capacity(n); let mut r: Single = 0; // PROOF: (B-1) * B + (B-1) still fits in double - let with_r = |x: Double, r: Single| { Double::from(r) * B + x }; + let with_r = |x: Single, r: Single| { Double::from(r) * B + Double::from(x) }; for d in (0..n).rev() { - let (q, rr) = div_single(with_r(self.get(d).into(), r), other) ; + let (q, rr) = div_single(with_r(self.get(d), r), other) ; out.set(d, q as Single); r = rr; } @@ -580,14 +580,25 @@ pub mod tests { BigUint { digits: vec![1; n] } } + #[test] + fn shift_check() { + let shift = sp_std::mem::size_of::() - sp_std::mem::size_of::(); + assert_eq!(shift * 8, SHIFT); + } + #[test] fn split_works() { let a = SHIFT / 2; let b = SHIFT * 3 / 2; let num: Double = 1 << a | 1 << b; - // example when `Single = u8` - // assert_eq!(num, 0b_0001_0000_0001_0000) + assert_eq!(num, 0x_0001_0000_0001_0000); assert_eq!(split(num), (1 << a, 1 << a)); + + let a = SHIFT / 2 + 4; + let b = SHIFT / 2 - 4; + let num: Double = 1 << (SHIFT + a) | 1 << b; + assert_eq!(num, 0x_0010_0000_0000_1000); + assert_eq!(split(num), (1 << a, 1 << b)); } #[test] @@ -734,6 +745,7 @@ pub mod tests { fn div_unit_works() { let a = BigUint { digits: vec![100] }; let b = BigUint { digits: vec![1, 100] }; + let c = BigUint { digits: vec![14, 28, 100] }; assert_eq!(a.clone().div_unit(1), a); assert_eq!(a.clone().div_unit(0), a); @@ -745,5 +757,9 @@ pub mod tests { assert_eq!(b.clone().div_unit(2), BigUint::from(((B + 100) / 2) as Single)); assert_eq!(b.clone().div_unit(7), BigUint::from(((B + 100) / 7) as Single)); + assert_eq!(c.clone().div_unit(1), c); + assert_eq!(c.clone().div_unit(0), c); + assert_eq!(c.clone().div_unit(2), BigUint { digits: vec![7, 14, 50] }); + assert_eq!(c.clone().div_unit(7), BigUint { digits: vec![2, 4, 14] }); } } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 8b882666946d3..44a869561070e 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Decimal Fixed Point implementations for Substrate runtime. diff --git a/primitives/arithmetic/src/helpers_128bit.rs b/primitives/arithmetic/src/helpers_128bit.rs index 1e332f54d3bd8..a979b6a48aa2a 100644 --- a/primitives/arithmetic/src/helpers_128bit.rs +++ b/primitives/arithmetic/src/helpers_128bit.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index f6521988c91a5..9b1e8711da8c6 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -114,19 +114,16 @@ impl_normalize_for_numeric!(u8, u16, u32, u64, u128); impl Normalizable

for Vec

{ fn normalize(&self, targeted_sum: P) -> Result, &'static str> { - let inners = self - .iter() - .map(|p| p.clone().deconstruct().into()) - .collect::>(); - - let normalized = normalize(inners.as_ref(), targeted_sum.deconstruct().into())?; - - Ok( - normalized - .into_iter() - .map(|i: UpperOf

| P::from_parts(i.saturated_into())) - .collect() - ) + let uppers = + self.iter().map(|p| >::from(p.clone().deconstruct())).collect::>(); + + let normalized = + normalize(uppers.as_ref(), >::from(targeted_sum.deconstruct()))?; + + Ok(normalized + .into_iter() + .map(|i: UpperOf

`. A user of this crate may statically assert that this can never happen and safely /// `expect` this to return `Ok`. -pub fn phragmms( +pub fn phragmms( to_elect: usize, initial_candidates: Vec, initial_voters: Vec<(AccountId, VoteWeight, Vec)>, balancing_config: Option<(usize, ExtendedBalance)>, -) -> Result, &'static str> - where ExtendedBalance: From> -{ +) -> Result, &'static str> { let (candidates, mut voters) = setup_inputs(initial_candidates, initial_voters); let mut winners = vec![]; @@ -88,7 +86,7 @@ pub fn phragmms( pub(crate) fn calculate_max_score( candidates: &[CandidatePtr], voters: &[Voter], -) -> Option> where ExtendedBalance: From> { +) -> Option> { for c_ptr in candidates.iter() { let mut candidate = c_ptr.borrow_mut(); if !candidate.elected { diff --git a/primitives/npos-elections/src/reduce.rs b/primitives/npos-elections/src/reduce.rs index 17d7dd1290f7d..a34f1612ca1a5 100644 --- a/primitives/npos-elections/src/reduce.rs +++ b/primitives/npos-elections/src/reduce.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,7 +45,7 @@ //! //! ### Resources: //! -//! 1. https://hackmd.io/JOn9x98iS0e0DPWQ87zGWg?view +//! 1. use crate::node::{Node, NodeId, NodeRef, NodeRole}; use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index dc7a1a5fdfb97..edfea038ebc50 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,13 @@ //! Tests for npos-elections. -use crate::mock::*; use crate::{ - seq_phragmen, balancing, build_support_map, is_score_better, helpers::*, - Support, StakedAssignment, Assignment, ElectionResult, ExtendedBalance, setup_inputs, - seq_phragmen_core, Voter, + balancing, helpers::*, is_score_better, mock::*, seq_phragmen, seq_phragmen_core, setup_inputs, + to_support_map, to_supports, Assignment, ElectionResult, ExtendedBalance, StakedAssignment, + Support, Voter, EvaluateSupport, }; +use sp_arithmetic::{PerU16, Perbill, Percent, Permill}; use substrate_test_utils::assert_eq_uvec; -use sp_arithmetic::{Perbill, Permill, Percent, PerU16}; #[test] fn float_phragmen_poc_works() { @@ -53,22 +52,62 @@ fn float_phragmen_poc_works() { assert_eq!( support_map.get(&2).unwrap(), - &_Support { own: 0.0, total: 25.0, others: vec![(10u64, 10.0), (30u64, 15.0)]} + &_Support { own: 0.0, total: 25.0, others: vec![(10u64, 10.0), (30u64, 15.0)] } ); assert_eq!( support_map.get(&3).unwrap(), - &_Support { own: 0.0, total: 35.0, others: vec![(20u64, 20.0), (30u64, 15.0)]} + &_Support { own: 0.0, total: 35.0, others: vec![(20u64, 20.0), (30u64, 15.0)] } ); equalize_float(phragmen_result.assignments, &mut support_map, 0.0, 2, stake_of); assert_eq!( support_map.get(&2).unwrap(), - &_Support { own: 0.0, total: 30.0, others: vec![(10u64, 10.0), (30u64, 20.0)]} + &_Support { own: 0.0, total: 30.0, others: vec![(10u64, 10.0), (30u64, 20.0)] } ); assert_eq!( support_map.get(&3).unwrap(), - &_Support { own: 0.0, total: 30.0, others: vec![(20u64, 20.0), (30u64, 10.0)]} + &_Support { own: 0.0, total: 30.0, others: vec![(20u64, 20.0), (30u64, 10.0)] } + ); +} + +#[test] +fn phragmen_core_test_without_edges() { + let candidates = vec![1, 2, 3]; + let voters = vec![ + (10, 10, vec![]), + (20, 20, vec![]), + (30, 30, vec![]), + ]; + + let (candidates, voters) = setup_inputs(candidates, voters); + + assert_eq!( + voters + .iter() + .map(|v| ( + v.who, + v.budget, + (v.edges.iter().map(|e| (e.who, e.weight)).collect::>()), + )) + .collect::>(), + vec![] + ); + + assert_eq!( + candidates + .iter() + .map(|c_ptr| ( + c_ptr.borrow().who, + c_ptr.borrow().elected, + c_ptr.borrow().round, + c_ptr.borrow().backed_stake, + )).collect::>(), + vec![ + (1, false, 0, 0), + (2, false, 0, 0), + (3, false, 0, 0), + ] ); } @@ -260,7 +299,7 @@ fn phragmen_poc_works() { let staked = assignment_ratio_to_staked(assignments, &stake_of); let winners = to_without_backing(winners); - let support_map = build_support_map::(&winners, &staked).unwrap(); + let support_map = to_support_map::(&winners, &staked).unwrap(); assert_eq_uvec!( staked, @@ -334,7 +373,7 @@ fn phragmen_poc_works_with_balancing() { let staked = assignment_ratio_to_staked(assignments, &stake_of); let winners = to_without_backing(winners); - let support_map = build_support_map::(&winners, &staked).unwrap(); + let support_map = to_support_map::(&winners, &staked).unwrap(); assert_eq_uvec!( staked, @@ -726,7 +765,7 @@ fn phragmen_self_votes_should_be_kept() { let staked_assignments = assignment_ratio_to_staked(result.assignments, &stake_of); let winners = to_without_backing(result.winners); - let supports = build_support_map::(&winners, &staked_assignments).unwrap(); + let supports = to_support_map::(&winners, &staked_assignments).unwrap(); assert_eq!(supports.get(&5u64), None); assert_eq!( @@ -799,6 +838,34 @@ fn duplicate_target_is_ignored_when_winner() { ); } +#[test] +fn support_map_and_vec_can_be_evaluated() { + let candidates = vec![1, 2, 3]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; + + let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); + let ElectionResult { + winners, + assignments, + } = seq_phragmen::<_, Perbill>( + 2, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + None, + ) + .unwrap(); + + let staked = assignment_ratio_to_staked(assignments, &stake_of); + let winners = to_without_backing(winners); + let support_map = to_support_map::(&winners, &staked).unwrap(); + let support_vec = to_supports(&winners, &staked).unwrap(); + + assert_eq!(support_map.evaluate(), support_vec.evaluate()); +} + mod assignment_convert_normalize { use super::*; #[test] @@ -1072,15 +1139,12 @@ mod score { } mod solution_type { - use codec::{Decode, Encode}; use super::AccountId; + use codec::{Decode, Encode}; // these need to come from the same dev-dependency `sp-npos-elections`, not from the crate. - use crate::{ - generate_solution_type, Assignment, - Error as PhragmenError, - }; - use sp_std::{convert::TryInto, fmt::Debug}; + use crate::{generate_solution_type, Assignment, CompactSolution, Error as PhragmenError}; use sp_arithmetic::Percent; + use sp_std::{convert::TryInto, fmt::Debug}; type TestAccuracy = Percent; @@ -1096,7 +1160,6 @@ mod solution_type { #[compact] struct InnerTestSolutionCompact::(12) ); - } #[test] @@ -1150,7 +1213,7 @@ mod solution_type { compact, Decode::decode(&mut &encoded[..]).unwrap(), ); - assert_eq!(compact.len(), 4); + assert_eq!(compact.voter_count(), 4); assert_eq!(compact.edge_count(), 2 + 4); assert_eq!(compact.unique_targets(), vec![10, 11, 20, 40, 50, 51]); } @@ -1286,7 +1349,7 @@ mod solution_type { ).unwrap(); // basically number of assignments that it is encoding. - assert_eq!(compacted.len(), assignments.len()); + assert_eq!(compacted.voter_count(), assignments.len()); assert_eq!( compacted.edge_count(), assignments.iter().fold(0, |a, b| a + b.distribution.len()), @@ -1370,9 +1433,12 @@ mod solution_type { ..Default::default() }; - assert_eq!(compact.unique_targets(), vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67]); + assert_eq!( + compact.unique_targets(), + vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67] + ); assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3 + 16); - assert_eq!(compact.len(), 6); + assert_eq!(compact.voter_count(), 6); // this one has some duplicates. let compact = TestSolutionCompact { @@ -1389,7 +1455,7 @@ mod solution_type { assert_eq!(compact.unique_targets(), vec![1, 3, 4, 7, 8, 11, 13]); assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3); - assert_eq!(compact.len(), 5); + assert_eq!(compact.voter_count(), 5); } #[test] diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index 02041d5c678ef..1e3d0a34b26b1 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0" +version = "3.0.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } [dev-dependencies] -sp-state-machine = { version = "0.8.0", default-features = false, path = "../state-machine" } +sp-state-machine = { version = "0.9.0", default-features = false, path = "../state-machine" } [features] default = ["std"] diff --git a/primitives/offchain/src/lib.rs b/primitives/offchain/src/lib.rs index fa5ab808df8a1..ffdc2bfcc3a64 100644 --- a/primitives/offchain/src/lib.rs +++ b/primitives/offchain/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #![warn(missing_docs)] /// Re-export of parent module scope storage prefix. -pub use sp_core::offchain::STORAGE_PREFIX as STORAGE_PREFIX; +pub use sp_core::offchain::STORAGE_PREFIX; sp_api::decl_runtime_apis! { /// The offchain worker api. diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index acf454b960a7c..ad03baca24ebb 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,4 +15,3 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = "0.3.38" -log = "0.4.8" diff --git a/primitives/panic-handler/src/lib.rs b/primitives/panic-handler/src/lib.rs index 2ac30dd636914..150ce52976807 100644 --- a/primitives/panic-handler/src/lib.rs +++ b/primitives/panic-handler/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 0c9fe8ebd6667..de7e2bd882e73 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-rpc" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../core" } +sp-core = { version = "3.0.0", path = "../core" } [dev-dependencies] serde_json = "1.0.41" diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index c479f0df8b60e..822aba4ba1969 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/rpc/src/list.rs b/primitives/rpc/src/list.rs index a80d5a22272c8..1f4c6ff098c4d 100644 --- a/primitives/rpc/src/list.rs +++ b/primitives/rpc/src/list.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/rpc/src/number.rs b/primitives/rpc/src/number.rs index 3d7e74753526c..ad19b7f5b4367 100644 --- a/primitives/rpc/src/number.rs +++ b/primitives/rpc/src/number.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ //! A number type that can be serialized both as a number or a string that encodes a number in a //! string. -use std::{convert::TryFrom, fmt::Debug}; +use std::{convert::{TryFrom, TryInto}, fmt::Debug}; use serde::{Serialize, Deserialize}; use sp_core::U256; @@ -39,6 +39,12 @@ pub enum NumberOrHex { Hex(U256), } +impl Default for NumberOrHex { + fn default() -> Self { + Self::Number(Default::default()) + } +} + impl NumberOrHex { /// Converts this number into an U256. pub fn into_u256(self) -> U256 { @@ -49,12 +55,24 @@ impl NumberOrHex { } } +impl From for NumberOrHex { + fn from(n: u32) -> Self { + NumberOrHex::Number(n.into()) + } +} + impl From for NumberOrHex { fn from(n: u64) -> Self { NumberOrHex::Number(n) } } +impl From for NumberOrHex { + fn from(n: u128) -> Self { + NumberOrHex::Hex(n.into()) + } +} + impl From for NumberOrHex { fn from(n: U256) -> Self { NumberOrHex::Hex(n) @@ -66,25 +84,28 @@ pub struct TryFromIntError(pub(crate) ()); impl TryFrom for u32 { type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - let num_or_hex = num_or_hex.into_u256(); - if num_or_hex > U256::from(u32::max_value()) { - return Err(TryFromIntError(())); - } else { - Ok(num_or_hex.as_u32()) - } + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) } } impl TryFrom for u64 { type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - let num_or_hex = num_or_hex.into_u256(); - if num_or_hex > U256::from(u64::max_value()) { - return Err(TryFromIntError(())); - } else { - Ok(num_or_hex.as_u64()) - } + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) + } +} + +impl TryFrom for u128 { + type Error = TryFromIntError; + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() } } diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index bc36098f05a54..c4eb084f685c4 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,23 +14,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" } -sp-runtime-interface-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-wasm-interface = { version = "3.0.0", path = "../wasm-interface", default-features = false } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-tracing = { version = "3.0.0", default-features = false, path = "../tracing" } +sp-runtime-interface-proc-macro = { version = "3.0.0", path = "proc-macro" } +sp-externalities = { version = "0.9.0", optional = true, path = "../externalities" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } static_assertions = "1.0.0" -primitive-types = { version = "0.7.0", default-features = false } -sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } +primitive-types = { version = "0.9.0", default-features = false } +sp-storage = { version = "3.0.0", default-features = false, path = "../storage" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0", path = "../core" } -sp-io = { version = "2.0.0", path = "../io" } +sp-state-machine = { version = "0.9.0", path = "../state-machine" } +sp-core = { version = "3.0.0", path = "../core" } +sp-io = { version = "3.0.0", path = "../io" } rustversion = "1.0.0" -trybuild = "1.0.23" +trybuild = "1.0.38" [features] default = [ "std" ] diff --git a/primitives/runtime-interface/README.md b/primitives/runtime-interface/README.md index 666bfe4d5a861..49e13f1b2e743 100644 --- a/primitives/runtime-interface/README.md +++ b/primitives/runtime-interface/README.md @@ -7,18 +7,19 @@ maps to an external function call. These external functions are exported by the and they map to the same implementation as the native calls. # Using a type in a runtime interface - + Any type that should be used in a runtime interface as argument or return value needs to -implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used -in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. -The slice pointer and the length will be mapped to an `u64` value. For more information see -this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from -the wasm runtime into the node. - -Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. +implement [`RIType`]. The associated type [`FFIType`](https:/docs.rs/sp-runtime-interface/latest/sp_runtime_interface/trait.RIType.html#associatedtype.FFIType) +is the type that is used in the FFI function to represent the actual type. For example `[T]` is +represented by an `u64`. The slice pointer and the length will be mapped to an `u64` value. +For more information see this [table](https:/docs.rs/sp-runtime-interface/latest/sp_runtime_interface/#ffi-type-and-conversion). +The FFI function definition is used when calling from the wasm runtime into the node. + +Traits are used to convert from a type to the corresponding +[`RIType::FFIType`](https:/docs.rs/sp-runtime-interface/latest/sp_runtime_interface/trait.RIType.html#associatedtype.FFIType). Depending on where and how a type should be used in a function signature, a combination of the following traits need to be implemented: - + 1. Pass as function argument: [`wasm::IntoFFIValue`] and [`host::FromFFIValue`] 2. As function return value: [`wasm::FromFFIValue`] and [`host::IntoFFIValue`] 3. Pass as mutable function argument: [`host::IntoPreallocatedFFIValue`] @@ -26,7 +27,7 @@ following traits need to be implemented: The traits are implemented for most of the common types like `[T]`, `Vec`, arrays and primitive types. -For custom types, we provide the [`PassBy`](pass_by::PassBy) trait and strategies that define +For custom types, we provide the [`PassBy`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/pass_by#PassBy) trait and strategies that define how a type is passed between the wasm runtime and the node. Each strategy also provides a derive macro to simplify the implementation. @@ -52,7 +53,7 @@ trait RuntimeInterface { ``` For more information on declaring a runtime interface, see -[`#[runtime_interface]`](attr.runtime_interface.html). +[`#[runtime_interface]`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/attr.runtime_interface.html). # FFI type and conversion @@ -80,9 +81,9 @@ the host side and how they are converted into the corresponding type. | `[u8; N]` | `u32` | `v.as_ptr()` | | `*const T` | `u32` | `Identity` | | `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | -| [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | -| [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | +| [`T where T: PassBy`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/pass_by#Inner) | Depends on inner | Depends on inner | +| [`T where T: PassBy`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/pass_by#Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | `Identity` means that the value is converted directly into the corresponding FFI type. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 8358d2170575f..51732ac631810 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.5", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "1.0.58", features = ["full", "visit", "fold", "extra-traits"] } quote = "1.0.3" proc-macro2 = "1.0.3" Inflector = "0.11.4" diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index df43551398a12..53df4e084d277 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs b/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs index 5e51440938456..1e6b72f882339 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs index 35ed9c0cb802f..cc0428fc9b56b 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs index cf3bb965d0743..7fe0d1734c36c 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs b/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs index ff5ea4849af77..80ac3396759fb 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index 2725bd2c89ce5..c5d0073e3fb63 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 7a4dbc5773a28..fb127b1941532 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 02c291975738c..78feda663850c 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 70015d02426d4..0e392b1a02fbf 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 45f66e3bf6525..f4cef852076bf 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Util function used by this crate. @@ -297,4 +298,4 @@ pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) } Ok(RuntimeInterface { items: functions }) -} \ No newline at end of file +} diff --git a/primitives/runtime-interface/src/host.rs b/primitives/runtime-interface/src/host.rs index 4a01291e68455..a6ea96af90043 100644 --- a/primitives/runtime-interface/src/host.rs +++ b/primitives/runtime-interface/src/host.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index da57cf086beef..4dd79aeccb39e 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -365,7 +365,9 @@ impl PassBy for Option { type PassBy = Codec; } -impl PassBy for (u32, u32, u32, u32) { +#[impl_trait_for_tuples::impl_for_tuples(30)] +#[tuple_types_no_default_trait_bound] +impl PassBy for Tuple where Self: codec::Codec { type PassBy = Codec; } diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 7ff5f0d7a042d..93b4a8db87e9d 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,16 +26,17 @@ //! # Using a type in a runtime interface //! //! Any type that should be used in a runtime interface as argument or return value needs to -//! implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used -//! in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. -//! The slice pointer and the length will be mapped to an `u64` value. For more information see -//! this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from -//! the wasm runtime into the node. +//! implement [`RIType`]. The associated type [`FFIType`](./trait.RIType.html#associatedtype.FFIType) +//! is the type that is used in the FFI function to represent the actual type. For example `[T]` is +//! represented by an `u64`. The slice pointer and the length will be mapped to an `u64` value. +//! For more information see this [table](#ffi-type-and-conversion). +//! The FFI function definition is used when calling from the wasm runtime into the node. //! -//! Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. +//! Traits are used to convert from a type to the corresponding +//! [`RIType::FFIType`](./trait.RIType.html#associatedtype.FFIType). //! Depending on where and how a type should be used in a function signature, a combination of the //! following traits need to be implemented: -//! +//! //! 1. Pass as function argument: [`wasm::IntoFFIValue`] and [`host::FromFFIValue`] //! 2. As function return value: [`wasm::FromFFIValue`] and [`host::IntoFFIValue`] //! 3. Pass as mutable function argument: [`host::IntoPreallocatedFFIValue`] @@ -43,7 +44,7 @@ //! The traits are implemented for most of the common types like `[T]`, `Vec`, arrays and //! primitive types. //! -//! For custom types, we provide the [`PassBy`](pass_by::PassBy) trait and strategies that define +//! For custom types, we provide the [`PassBy`](./pass_by#PassBy) trait and strategies that define //! how a type is passed between the wasm runtime and the node. Each strategy also provides a derive //! macro to simplify the implementation. //! @@ -69,7 +70,7 @@ //! ``` //! //! For more information on declaring a runtime interface, see -//! [`#[runtime_interface]`](attr.runtime_interface.html). +//! [`#[runtime_interface]`](./attr.runtime_interface.html). //! //! # FFI type and conversion //! @@ -97,8 +98,8 @@ //! | `[u8; N]` | `u32` | `v.as_ptr()` | //! | `*const T` | `u32` | `Identity` | //! | `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | -//! | [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | -//! | [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | +//! | [`T where T: PassBy`](./pass_by#Inner) | Depends on inner | Depends on inner | +//! | [`T where T: PassBy`](./pass_by#Codec)|`u64`|v.len() 32bit << 32 |v.as_ptr() 32bit| //! //! `Identity` means that the value is converted directly into the corresponding FFI type. diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index 5ccb3a5e96ee1..69485a1a2873f 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -238,7 +238,7 @@ impl PassByImpl for Codec { let (ptr, len) = unpack_ptr_and_len(arg); let vec = context.read_memory(Pointer::new(ptr), len)?; T::decode(&mut &vec[..]) - .map_err(|e| format!("Could not decode value from wasm: {}", e.what())) + .map_err(|e| format!("Could not decode value from wasm: {}", e)) } } diff --git a/primitives/runtime-interface/src/util.rs b/primitives/runtime-interface/src/util.rs index 604e37e8be397..5b3aa07e60d9f 100644 --- a/primitives/runtime-interface/src/util.rs +++ b/primitives/runtime-interface/src/util.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/src/wasm.rs b/primitives/runtime-interface/src/wasm.rs index 5511f60e30d21..387d6901e2f25 100644 --- a/primitives/runtime-interface/src/wasm.rs +++ b/primitives/runtime-interface/src/wasm.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 59790eb172eb3..91febf68ed285 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -13,13 +13,13 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "3.0.0", default-features = false, path = "../" } +sp-std = { version = "3.0.0", default-features = false, path = "../../std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../io" } +sp-core = { version = "3.0.0", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm-deprecated/build.rs b/primitives/runtime-interface/test-wasm-deprecated/build.rs index 4f111bc993007..a1c4b2d892cfe 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/build.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 174cdb8cdf85a..0a7e2b49bbbbb 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,8 +26,8 @@ use sp_runtime_interface::runtime_interface; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 39c8df976a5ba..d0a61c5b920f4 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -13,13 +13,13 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "3.0.0", default-features = false, path = "../" } +sp-std = { version = "3.0.0", default-features = false, path = "../../std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../io" } +sp-core = { version = "3.0.0", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm/build.rs b/primitives/runtime-interface/test-wasm/build.rs index 4f111bc993007..a1c4b2d892cfe 100644 --- a/primitives/runtime-interface/test-wasm/build.rs +++ b/primitives/runtime-interface/test-wasm/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 28895df2214d1..4cdf59349dd76 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,8 +30,8 @@ use sp_core::{sr25519::Public, wasm_export_functions}; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") @@ -120,6 +120,16 @@ pub trait TestApi { fn test_versionning(&self, data: u32) -> bool { data == 42 } + + /// Returns the input values as tuple. + fn return_input_as_tuple( + a: Vec, + b: u32, + c: Option>, + d: u8, + ) -> (Vec, u32, Option>, u8) { + (a, b, c, d) + } } /// This function is not used, but we require it for the compiler to include `sp-io`. @@ -258,4 +268,18 @@ wasm_export_functions! { assert!(!test_api::test_versionning(50)); assert!(!test_api::test_versionning(102)); } + + fn test_return_input_as_tuple() { + let a = vec![1, 3, 4, 5]; + let b = 10000; + let c = Some(vec![2, 3]); + let d = 5; + + let res = test_api::return_input_as_tuple(a.clone(), b, c.clone(), d); + + assert_eq!(a, res.0); + assert_eq!(b, res.1); + assert_eq!(c, res.2); + assert_eq!(d, res.3); + } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index d802f9cb6b39a..f25183f021227 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -12,13 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0", path = "../" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } +sp-runtime-interface = { version = "3.0.0", path = "../" } +sc-executor = { version = "0.9.0", path = "../../../client/executor" } sp-runtime-interface-test-wasm = { version = "2.0.0", path = "../test-wasm" } sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0", path = "../test-wasm-deprecated" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-core = { version = "2.0.0", path = "../../core" } -sp-io = { version = "2.0.0", path = "../../io" } -tracing = "0.1.19" +sp-state-machine = { version = "0.9.0", path = "../../state-machine" } +sp-runtime = { version = "3.0.0", path = "../../runtime" } +sp-core = { version = "3.0.0", path = "../../core" } +sp-io = { version = "3.0.0", path = "../../io" } +tracing = "0.1.22" tracing-core = "0.1.17" diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index c66609daa2f29..4426997663485 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,6 +44,7 @@ fn call_wasm_method_with_result( Some(8), host_functions, 8, + None, ); executor.call_in_wasm( binary, @@ -208,4 +209,9 @@ fn test_tracing() { let inner = subscriber.0.lock().unwrap(); assert!(inner.spans.contains("return_input_version_1")); -} \ No newline at end of file +} + +#[test] +fn test_return_input_as_tuple() { + call_wasm_method::(&wasm_binary_unwrap()[..], "test_return_input_as_tuple"); +} diff --git a/primitives/runtime-interface/tests/ui.rs b/primitives/runtime-interface/tests/ui.rs index 2f7fd6d06bcd3..5a6025f463af0 100644 --- a/primitives/runtime-interface/tests/ui.rs +++ b/primitives/runtime-interface/tests/ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 6579a17c77fec..0e4f6168ba118 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,25 +16,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../arithmetic" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-io = { version = "3.0.0", default-features = false, path = "../io" } log = { version = "0.4.8", optional = true } -paste = "0.1.6" +paste = "1.0" rand = { version = "0.7.2", optional = true } -impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +impl-trait-for-tuples = "0.2.1" +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } [dev-dependencies] serde_json = "1.0.41" rand = "0.7.2" -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.9.0", path = "../state-machine" } [features] bench = [] @@ -50,7 +49,6 @@ std = [ "sp-std/std", "sp-io/std", "serde", - "sp-inherents/std", "parity-util-mem/std", "hash256-std-hasher/std", "either/use_std", diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index 27eb89a76947e..09ca9a9c46af1 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 4a758b7416dec..7b2a10297f9c6 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index f355308a59f97..2c3392a133799 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index ec0963e5ba002..16bd887f04748 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index 9bfab517a92ca..5bee170048b5f 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -107,13 +107,13 @@ impl Era { } impl Encode for Era { - fn encode_to(&self, output: &mut T) { + fn encode_to(&self, output: &mut T) { match self { Era::Immortal => output.push_byte(0), Era::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; - output.push(&encoded); + encoded.encode_to(output); } } } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index e6c800e5787ff..62f9908fbe58d 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -106,12 +106,12 @@ impl Encode for Header where Hash: HashT, Hash::Output: Encode, { - fn encode_to(&self, dest: &mut T) { - dest.push(&self.parent_hash); - dest.push(&<<::Type as EncodeAsRef<_>>::RefType>::from(&self.number)); - dest.push(&self.state_root); - dest.push(&self.extrinsics_root); - dest.push(&self.digest); + fn encode_to(&self, dest: &mut T) { + self.parent_hash.encode_to(dest); + <<::Type as EncodeAsRef<_>>::RefType>::from(&self.number).encode_to(dest); + self.state_root.encode_to(dest); + self.extrinsics_root.encode_to(dest); + self.digest.encode_to(dest); } } diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index 2a25c063ead73..f5087eccab080 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index 56138094fa024..ec31e7de48524 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index ab9afdb28b602..5c87d2715509d 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e6c707e906edc..2fb4f7546d233 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,9 +56,13 @@ pub mod traits; pub mod transaction_validity; pub mod random_number_generator; mod runtime_string; +mod multiaddress; pub use crate::runtime_string::*; +// Re-export Multiaddress +pub use multiaddress::MultiAddress; + /// Re-export these since they're only "kind of" generic. pub use generic::{DigestItem, Digest}; @@ -407,6 +411,10 @@ pub enum DispatchError { #[cfg_attr(feature = "std", serde(skip_deserializing))] message: Option<&'static str>, }, + /// At least one consumer is remaining so the account cannot be destroyed. + ConsumerRemaining, + /// There are no providers so the account cannot be created. + NoProviders, } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about @@ -456,6 +464,15 @@ impl From for DispatchError { } } +impl From for DispatchError { + fn from(e: crate::traits::StoredMapError) -> Self { + match e { + crate::traits::StoredMapError::ConsumerRemaining => Self::ConsumerRemaining, + crate::traits::StoredMapError::NoProviders => Self::NoProviders, + } + } +} + impl From<&'static str> for DispatchError { fn from(err: &'static str) -> DispatchError { DispatchError::Other(err) @@ -466,9 +483,11 @@ impl From for &'static str { fn from(err: DispatchError) -> &'static str { match err { DispatchError::Other(msg) => msg, - DispatchError::CannotLookup => "Can not lookup", + DispatchError::CannotLookup => "Cannot lookup", DispatchError::BadOrigin => "Bad origin", DispatchError::Module { message, .. } => message.unwrap_or("Unknown module error"), + DispatchError::ConsumerRemaining => "Consumer remaining", + DispatchError::NoProviders => "No providers", } } } @@ -486,7 +505,7 @@ impl traits::Printable for DispatchError { "DispatchError".print(); match self { Self::Other(err) => err.print(), - Self::CannotLookup => "Can not lookup".print(), + Self::CannotLookup => "Cannot lookup".print(), Self::BadOrigin => "Bad origin".print(), Self::Module { index, error, message } => { index.print(); @@ -495,6 +514,8 @@ impl traits::Printable for DispatchError { msg.print(); } } + Self::ConsumerRemaining => "Consumer remaining".print(), + Self::NoProviders => "No providers".print(), } } } diff --git a/primitives/runtime/src/multiaddress.rs b/primitives/runtime/src/multiaddress.rs new file mode 100644 index 0000000000000..d09cd7acaf4db --- /dev/null +++ b/primitives/runtime/src/multiaddress.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! MultiAddress type is a wrapper for multiple downstream account formats. + +use codec::{Encode, Decode}; +use sp_std::vec::Vec; + +/// A multi-format address wrapper for on-chain accounts. +#[derive(Encode, Decode, PartialEq, Eq, Clone, crate::RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Hash))] +pub enum MultiAddress { + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(#[codec(compact)] AccountIndex), + /// It's some arbitrary raw bytes. + Raw(Vec), + /// It's a 32 byte representation. + Address32([u8; 32]), + /// Its a 20 byte representation. + Address20([u8; 20]), +} + +#[cfg(feature = "std")] +impl std::fmt::Display for MultiAddress +where + AccountId: std::fmt::Debug, + AccountIndex: std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + use sp_core::hexdisplay::HexDisplay; + match self { + MultiAddress::Raw(inner) => write!(f, "MultiAddress::Raw({})", HexDisplay::from(inner)), + MultiAddress::Address32(inner) => write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)), + MultiAddress::Address20(inner) => write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)), + _ => write!(f, "{:?}", self), + } + } +} + +impl From for MultiAddress { + fn from(a: AccountId) -> Self { + MultiAddress::Id(a) + } +} + +impl Default for MultiAddress { + fn default() -> Self { + MultiAddress::Id(Default::default()) + } +} diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 12a0fcf1e5b45..31eec32f6a315 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/offchain/mod.rs b/primitives/runtime/src/offchain/mod.rs index fe5844ce3004b..c9d1eda0f8738 100644 --- a/primitives/runtime/src/offchain/mod.rs +++ b/primitives/runtime/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/offchain/storage.rs b/primitives/runtime/src/offchain/storage.rs index 2f62d400c0b95..56bebf956c13f 100644 --- a/primitives/runtime/src/offchain/storage.rs +++ b/primitives/runtime/src/offchain/storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -105,7 +105,6 @@ mod tests { use sp_io::TestExternalities; use sp_core::offchain::{ OffchainExt, - OffchainStorage, testing, }; @@ -125,7 +124,7 @@ mod tests { assert_eq!(val.get::(), Some(Some(15_u32))); assert_eq!(val.get::>(), Some(None)); assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), + state.read().persistent_storage.get(b"testval"), Some(vec![15_u8, 0, 0, 0]) ); }) @@ -148,7 +147,7 @@ mod tests { assert_eq!(result, Ok(Ok(16_u32))); assert_eq!(val.get::(), Some(Some(16_u32))); assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), + state.read().persistent_storage.get(b"testval"), Some(vec![16_u8, 0, 0, 0]) ); diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index a3838f21fd13d..416689cadfb8b 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! # Off-chain Storage Lock //! @@ -438,11 +439,11 @@ pub trait BlockNumberProvider { /// /// In case of using crate `sp_runtime` without the crate `frame` /// system, it is already implemented for - /// `frame_system::Module` as: + /// `frame_system::Module` as: /// /// ```ignore /// fn current_block_number() -> Self { - /// frame_system::Module::block_number() + /// frame_system::Module::block_number() /// } /// ``` /// . @@ -452,7 +453,7 @@ pub trait BlockNumberProvider { #[cfg(test)] mod tests { use super::*; - use sp_core::offchain::{testing, OffchainExt, OffchainStorage}; + use sp_core::offchain::{testing, OffchainExt}; use sp_io::TestExternalities; const VAL_1: u32 = 0u32; @@ -485,7 +486,7 @@ mod tests { } }); // lock must have been cleared at this point - assert_eq!(state.read().persistent_storage.get(b"", b"lock_1"), None); + assert_eq!(state.read().persistent_storage.get(b"lock_1"), None); } #[test] @@ -508,7 +509,7 @@ mod tests { guard.forget(); }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_2"); + let opt = state.read().persistent_storage.get(b"lock_2"); assert!(opt.is_some()); } @@ -540,7 +541,7 @@ mod tests { }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_3"); + let opt = state.read().persistent_storage.get(b"lock_3"); assert!(opt.is_some()); } @@ -587,7 +588,7 @@ mod tests { }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_4"); + let opt = state.read().persistent_storage.get(b"lock_4"); assert_eq!(opt.unwrap(), vec![132_u8, 3u8, 0, 0, 0, 0, 0, 0]); // 132 + 256 * 3 = 900 } } diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs index 23d0421742bd8..a4d1a66370c19 100644 --- a/primitives/runtime/src/random_number_generator.rs +++ b/primitives/runtime/src/random_number_generator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/runtime_string.rs b/primitives/runtime/src/runtime_string.rs index 7fd38f48df638..e315de430c12d 100644 --- a/primitives/runtime/src/runtime_string.rs +++ b/primitives/runtime/src/runtime_string.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,6 +32,22 @@ pub enum RuntimeString { Owned(Vec), } +/// Convenience macro to use the format! interface to get a `RuntimeString::Owned` +#[macro_export] +macro_rules! format_runtime_string { + ($($args:tt)*) => {{ + #[cfg(feature = "std")] + { + sp_runtime::RuntimeString::Owned(format!($($args)*)) + } + #[cfg(not(feature = "std"))] + { + sp_runtime::RuntimeString::Owned(sp_std::alloc::format!($($args)*).as_bytes().to_vec()) + } + }}; +} + + impl From<&'static str> for RuntimeString { fn from(data: &'static str) -> Self { Self::Borrowed(data) diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 97e128f363c89..b6d2641f01083 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -247,7 +247,7 @@ impl<'a, Xt> Deserialize<'a> for Block where Block: Decode { fn deserialize>(de: D) -> Result { let r = >::deserialize(de)?; Decode::decode(&mut &r[..]) - .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) + .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e))) } } diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 4d2b1f062f716..128c9a6eed0a0 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -153,6 +153,25 @@ impl From for &'static str { } } +/// Error that can be returned by our impl of `StoredMap`. +#[derive(Encode, Decode, RuntimeDebug)] +pub enum StoredMapError { + /// Attempt to create map value when it is a consumer and there are no providers in place. + NoProviders, + /// Attempt to anull/remove value when it is the last provider and there is still at + /// least one consumer left. + ConsumerRemaining, +} + +impl From for &'static str { + fn from(e: StoredMapError) -> &'static str { + match e { + StoredMapError::NoProviders => "No providers", + StoredMapError::ConsumerRemaining => "Consumer remaining", + } + } +} + /// An error that indicates that a lookup failed. #[derive(Encode, Decode, RuntimeDebug)] pub struct LookupError; @@ -209,6 +228,44 @@ impl Lookup for IdentityLookup { fn lookup(&self, x: T) -> Result { Ok(x) } } +/// A lookup implementation returning the `AccountId` from a `MultiAddress`. +pub struct AccountIdLookup(PhantomData<(AccountId, AccountIndex)>); +impl StaticLookup for AccountIdLookup +where + AccountId: Codec + Clone + PartialEq + Debug, + AccountIndex: Codec + Clone + PartialEq + Debug, + crate::MultiAddress: Codec, +{ + type Source = crate::MultiAddress; + type Target = AccountId; + fn lookup(x: Self::Source) -> Result { + match x { + crate::MultiAddress::Id(i) => Ok(i), + _ => Err(LookupError), + } + } + fn unlookup(x: Self::Target) -> Self::Source { + crate::MultiAddress::Id(x) + } +} + +/// Perform a StaticLookup where there are multiple lookup sources of the same type. +impl StaticLookup for (A, B) +where + A: StaticLookup, + B: StaticLookup, +{ + type Source = A::Source; + type Target = A::Target; + + fn lookup(x: Self::Source) -> Result { + A::lookup(x.clone()).or_else(|_| B::lookup(x)) + } + fn unlookup(x: Self::Target) -> Self::Source { + A::unlookup(x) + } +} + /// Extensible conversion trait. Generic over both source and destination types. pub trait Convert { /// Make conversion. @@ -655,7 +712,7 @@ pub trait Dispatchable { /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. type Origin; /// ... - type Trait; + type Config; /// An opaque set of information attached to the transaction. This could be constructed anywhere /// down the line in a runtime. The current Substrate runtime uses a struct with the same name /// to represent the dispatch class and weight. @@ -674,7 +731,7 @@ pub type PostDispatchInfoOf = ::PostInfo; impl Dispatchable for () { type Origin = (); - type Trait = (); + type Config = (); type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 2191e59b9bb26..b0c3e4dd031cc 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -54,6 +54,13 @@ pub enum InvalidTransaction { /// it will only be able to assume a bad signature and cannot express a more meaningful error. BadProof, /// The transaction birth block is ancient. + /// + /// # Possible causes + /// + /// For `FRAME`-based runtimes this would be caused by `current block number + /// - Era::birth block number > BlockHashCount`. (e.g. in Polkadot `BlockHashCount` = 2400, so a + /// transaction with birth block number 1337 would be valid up until block number 1337 + 2400, + /// after which point the transaction would be considered to have an ancient birth block.) AncientBirthBlock, /// The transaction would exhaust the resources of current block. /// diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 70ae56fb48108..9efe5cde7a426 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-sandbox" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0", default-features = false, path = "../wasm-interface" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-io = { version = "3.0.0", default-features = false, path = "../io" } +sp-wasm-interface = { version = "3.0.0", default-features = false, path = "../wasm-interface" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } [dev-dependencies] wat = "1.0" diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index a1348370dfe4b..22e68439958d9 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/sandbox/with_std.rs b/primitives/sandbox/with_std.rs index 0f46f49503cac..d5f87f165137e 100755 --- a/primitives/sandbox/with_std.rs +++ b/primitives/sandbox/with_std.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/sandbox/without_std.rs b/primitives/sandbox/without_std.rs index dfd3742c6e96f..5897462629c4d 100755 --- a/primitives/sandbox/without_std.rs +++ b/primitives/sandbox/without_std.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 5a4514db86da9..51b53b43a40b7 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-serializer" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/serializer/src/lib.rs b/primitives/serializer/src/lib.rs index c1e03e58a7af8..3aef9ef5a3873 100644 --- a/primitives/serializer/src/lib.rs +++ b/primitives/serializer/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 4fccce6283142..c04b271bc0370 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-session" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-staking = { version = "2.0.0", default-features = false, path = "../staking" } -sp-runtime = { version = "2.0.0", optional = true, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-staking = { version = "3.0.0", default-features = false, path = "../staking" } +sp-runtime = { version = "3.0.0", optional = true, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index 38a852dafd1dd..8000c23dd4311 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/sr-api/proc-macro/src/lib.rs b/primitives/sr-api/proc-macro/src/lib.rs index 0c506a1455dbe..4c4aa0d7cb929 100644 --- a/primitives/sr-api/proc-macro/src/lib.rs +++ b/primitives/sr-api/proc-macro/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Macros for declaring and implementing runtime apis. diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 315d5acc49dae..cf2347082a885 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-staking" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 3f6c1873ff031..4bb8ed93f88a1 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -1,16 +1,19 @@ - -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 650a17e7898a1..0212d1bd8f2f7 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 95751bd4cb1d8..9db850cfe0b96 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -16,23 +16,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.11", optional = true } thiserror = { version = "1.0.21", optional = true } -parking_lot = { version = "0.10.0", optional = true } +parking_lot = { version = "0.11.1", optional = true } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.0", default-features = false } +trie-db = { version = "0.22.2", default-features = false } trie-root = { version = "0.16.0", default-features = false } -sp-trie = { version = "2.0.0", path = "../trie", default-features = false } -sp-core = { version = "2.0.0", path = "../core", default-features = false } -sp-panic-handler = { version = "2.0.0", path = "../panic-handler", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-trie = { version = "3.0.0", path = "../trie", default-features = false } +sp-core = { version = "3.0.0", path = "../core", default-features = false } +sp-panic-handler = { version = "3.0.0", path = "../panic-handler", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } num-traits = { version = "0.2.8", default-features = false } rand = { version = "0.7.2", optional = true } -sp-externalities = { version = "0.8.0", path = "../externalities", default-features = false } +sp-externalities = { version = "0.9.0", path = "../externalities", default-features = false } smallvec = "1.4.1" -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } [dev-dependencies] hex-literal = "0.3.1" -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-runtime = { version = "3.0.0", path = "../runtime" } pretty_assertions = "0.6.1" [features] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 360fe9a985682..eb1c566c6dde1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -94,7 +94,8 @@ pub trait Backend: sp_std::fmt::Debug { ) -> Result, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. - fn for_keys_in_child_storage( + /// Aborts as soon as `f` returns false. + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, @@ -263,12 +264,12 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - (*self).for_keys_in_child_storage(child_info, f) + (*self).apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 5e3c9bed64f10..3b265208136ac 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -210,8 +210,10 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, child_info: &ChildInfo, - ) { + _limit: Option, + ) -> bool { self.inner.children_default.remove(child_info.storage_key()); + true } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -261,8 +263,6 @@ impl Externalities for BasicExternalities { crate::ext::StorageAppend::new(current).append(value); } - fn chain_id(&self) -> u64 { 42 } - fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| { @@ -407,7 +407,7 @@ mod tests { ext.clear_child_storage(child_info, b"dog"); assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(child_info); + ext.kill_child_storage(child_info, None); assert_eq!(ext.child_storage(child_info, b"doe"), None); } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index b23481411ae27..1e0fc5c4d6c82 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index ef83966795f5e..9b2190ae1951f 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +34,7 @@ use sp_core::storage::PrefixedStorageKey; /// is inserted (because digest block will includes all keys from this entry). /// When there's a fork, entries are pruned when first changes trie is inserted. pub struct BuildCache { - /// Map of block (implies changes true) number => changes trie root. + /// Map of block (implies changes trie) number => changes trie root. roots_by_number: HashMap, /// Map of changes trie root => set of storage keys that are in this trie. /// The `Option>` in inner `HashMap` stands for the child storage key. diff --git a/primitives/state-machine/src/changes_trie/build_iterator.rs b/primitives/state-machine/src/changes_trie/build_iterator.rs index 3bafd608efa85..43089d819b66d 100644 --- a/primitives/state-machine/src/changes_trie/build_iterator.rs +++ b/primitives/state-machine/src/changes_trie/build_iterator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index f9398b3ce5dd4..be35581e7514d 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 56971f708975f..85a8de0b78d81 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -123,7 +123,7 @@ impl ExtrinsicIndex { } impl Encode for ExtrinsicIndex { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { dest.push_byte(1); self.block.encode_to(dest); self.key.encode_to(dest); @@ -142,7 +142,7 @@ impl DigestIndex { impl Encode for DigestIndex { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { dest.push_byte(2); self.block.encode_to(dest); self.key.encode_to(dest); @@ -158,7 +158,7 @@ impl ChildIndex { } impl Encode for ChildIndex { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { dest.push_byte(3); self.block.encode_to(dest); self.storage_key.encode_to(dest); diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index fd7b38c052f9e..105f3d7de6d39 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 54456f97add1f..a741b814a5c70 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 51b7ff6f50f71..e08fe36126c7b 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/changes_trie/surface_iterator.rs b/primitives/state-machine/src/changes_trie/surface_iterator.rs index b9c9d09f0f73b..13da8511f3f96 100644 --- a/primitives/state-machine/src/changes_trie/surface_iterator.rs +++ b/primitives/state-machine/src/changes_trie/surface_iterator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/error.rs b/primitives/state-machine/src/error.rs index 0b02c68f79f5c..2705e4623a784 100644 --- a/primitives/state-machine/src/error.rs +++ b/primitives/state-machine/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,18 +32,18 @@ impl Error for T {} /// would not be executed unless externalities were available. This is included for completeness, /// and as a transition away from the pre-existing framework. #[derive(Debug, Eq, PartialEq)] +#[allow(missing_docs)] #[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum ExecutionError { - /// Backend error. #[cfg_attr(feature = "std", error("Backend error {0:?}"))] Backend(crate::DefaultError), - /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. + #[cfg_attr(feature = "std", error("`:code` entry does not exist in storage"))] CodeEntryDoesNotExist, - /// Backend is incompatible with execution proof generation process. + #[cfg_attr(feature = "std", error("Unable to generate proof"))] UnableToGenerateProof, - /// Invalid execution proof. + #[cfg_attr(feature = "std", error("Invalid execution proof"))] InvalidProof, } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 53aab42999d5e..1e64cd74bc1b3 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,8 +35,6 @@ use codec::{Decode, Encode, EncodeAppend}; use sp_std::{fmt, any::{Any, TypeId}, vec::Vec, vec, boxed::Box}; use crate::{warn, trace, log_error}; #[cfg(feature = "std")] -use sp_core::offchain::storage::OffchainOverlayedChanges; -#[cfg(feature = "std")] use crate::changes_trie::State as ChangesTrieState; use crate::StorageTransactionCache; #[cfg(feature = "std")] @@ -100,9 +98,6 @@ pub struct Ext<'a, H, N, B> { /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, - /// The overlayed changes destined for the Offchain DB. - #[cfg(feature = "std")] - offchain_overlay: &'a mut OffchainOverlayedChanges, /// The storage backend to read from. backend: &'a B, /// The cache for the storage transactions. @@ -146,7 +141,6 @@ impl<'a, H, N, B> Ext<'a, H, N, B> #[cfg(feature = "std")] pub fn new( overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, changes_trie_state: Option>, @@ -154,7 +148,6 @@ impl<'a, H, N, B> Ext<'a, H, N, B> ) -> Self { Self { overlay, - offchain_overlay, backend, changes_trie_state, storage_transaction_cache, @@ -170,12 +163,6 @@ impl<'a, H, N, B> Ext<'a, H, N, B> fn mark_dirty(&mut self) { self.storage_transaction_cache.reset(); } - - /// Read only accessor for the scheduled overlay changes. - #[cfg(feature = "std")] - pub fn get_offchain_storage_changes(&self) -> &OffchainOverlayedChanges { - &*self.offchain_overlay - } } #[cfg(test)] @@ -206,18 +193,10 @@ where B: Backend, N: crate::changes_trie::BlockNumber, { - #[cfg(feature = "std")] fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { - use ::sp_core::offchain::STORAGE_PREFIX; - match value { - Some(value) => self.offchain_overlay.set(STORAGE_PREFIX, key, value), - None => self.offchain_overlay.remove(STORAGE_PREFIX, key), - } + self.overlay.set_offchain_storage(key, value) } - #[cfg(not(feature = "std"))] - fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} - fn storage(&self, key: &[u8]) -> Option { let _guard = guard(); let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| @@ -411,18 +390,41 @@ where fn kill_child_storage( &mut self, child_info: &ChildInfo, - ) { + limit: Option, + ) -> bool { trace!(target: "state", "{:04x}: KillChild({})", self.id, HexDisplay::from(&child_info.storage_key()), ); let _guard = guard(); - self.mark_dirty(); self.overlay.clear_child_storage(child_info); - self.backend.for_keys_in_child_storage(child_info, |key| { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - }); + + if let Some(limit) = limit { + let mut num_deleted: u32 = 0; + let mut all_deleted = true; + self.backend.apply_to_child_keys_while(child_info, |key| { + if num_deleted == limit { + all_deleted = false; + return false; + } + if let Some(num) = num_deleted.checked_add(1) { + num_deleted = num; + } else { + all_deleted = false; + return false; + } + self.overlay.set_child_storage(child_info, key.to_vec(), None); + true + }); + all_deleted + } else { + self.backend.apply_to_child_keys_while(child_info, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + true + }); + true + } } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -431,8 +433,9 @@ where HexDisplay::from(&prefix), ); let _guard = guard(); - if is_child_storage_key(prefix) { - warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); + + if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) { + warn!(target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key"); return; } @@ -484,10 +487,6 @@ where StorageAppend::new(current_value).append(value); } - fn chain_id(&self) -> u64 { - 42 - } - fn storage_root(&mut self) -> Vec { let _guard = guard(); if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { @@ -783,7 +782,6 @@ mod tests { H256, Blake2Hasher, map, - offchain, storage::{ Storage, StorageChild, @@ -806,16 +804,11 @@ mod tests { changes.set_extrinsic_index(1); changes.set_storage(vec![1], Some(vec![100])); changes.set_storage(EXTRINSIC_INDEX.to_vec(), Some(3u32.encode())); + changes.set_offchain_storage(b"k1", Some(b"v1")); + changes.set_offchain_storage(b"k2", Some(b"v2")); changes } - fn prepare_offchain_overlay_with_changes() -> OffchainOverlayedChanges { - let mut ooc = OffchainOverlayedChanges::enabled(); - ooc.set(offchain::STORAGE_PREFIX, b"k1", b"v1"); - ooc.set(offchain::STORAGE_PREFIX, b"k2", b"v2"); - ooc - } - fn changes_trie_config() -> ChangesTrieConfiguration { ChangesTrieConfiguration { digest_interval: 0, @@ -826,32 +819,29 @@ mod tests { #[test] fn storage_changes_root_is_none_when_storage_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } #[test] fn storage_changes_root_is_none_when_state_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } #[test] fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, state, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), @@ -861,14 +851,13 @@ mod tests { #[test] fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); overlay.set_collect_extrinsics(false); overlay.set_storage(vec![1], None); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, state, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), @@ -881,7 +870,6 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let backend = Storage { top: map![ vec![10] => vec![10], @@ -891,7 +879,7 @@ mod tests { children_default: map![] }.into(); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -907,7 +895,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -936,10 +924,7 @@ mod tests { ], }.into(); - - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); - - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -955,7 +940,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -969,7 +954,6 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let backend = Storage { top: map![], children_default: map![ @@ -984,7 +968,7 @@ mod tests { ], }.into(); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -1005,6 +989,44 @@ mod tests { ); } + #[test] + fn clear_prefix_cannot_delete_a_child_root() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; + let mut cache = StorageTransactionCache::default(); + let mut overlay = OverlayedChanges::default(); + let backend = Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![30] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }.into(); + + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + use sp_core::storage::well_known_keys; + let mut ext = ext; + let mut not_under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); + not_under_prefix[4] = 88; + not_under_prefix.extend(b"path"); + ext.set_storage(not_under_prefix.clone(), vec![10]); + + ext.clear_prefix(&[]); + ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4]); + let mut under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); + under_prefix.extend(b"path"); + ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4]); + assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![40])); + assert_eq!(ext.storage(not_under_prefix.as_slice()), Some(vec![10])); + ext.clear_prefix(¬_under_prefix[..5]); + assert_eq!(ext.storage(not_under_prefix.as_slice()), None); + } + #[test] fn storage_append_works() { let mut data = Vec::new(); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f211f60202730..4ee16dfd2f8a8 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -1,72 +1,38 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! State machine in memory backend. use crate::{ - StorageKey, StorageValue, StorageCollection, - trie_backend::TrieBackend, + StorageKey, StorageValue, StorageCollection, trie_backend::TrieBackend, backend::Backend, }; -use std::{collections::{BTreeMap, HashMap}}; +use std::collections::{BTreeMap, HashMap}; use hash_db::Hasher; -use sp_trie::{ - MemoryDB, TrieMut, - trie_types::TrieDBMut, -}; +use sp_trie::{MemoryDB, empty_trie_root, Layout}; use codec::Codec; use sp_core::storage::{ChildInfo, Storage}; -/// Insert input pairs into memory db. -fn insert_into_memory_db(mut root: H::Out, mdb: &mut MemoryDB, input: I) -> H::Out -where - H: Hasher, - I: IntoIterator)>, -{ - { - let mut trie = if root == Default::default() { - TrieDBMut::::new(mdb, &mut root) - } else { - TrieDBMut::::from_existing(mdb, &mut root).unwrap() - }; - for (key, value) in input { - if let Err(e) = match value { - Some(value) => { - trie.insert(&key, &value) - }, - None => { - trie.remove(&key) - }, - } { - panic!("Failed to write to trie: {}", e); - } - } - trie.commit(); - } - root -} - /// Create a new empty instance of in-memory backend. pub fn new_in_mem() -> TrieBackend, H> where H::Out: Codec + Ord, { let db = MemoryDB::default(); - let mut backend = TrieBackend::new(db, Default::default()); - backend.insert(std::iter::empty()); - backend + TrieBackend::new(db, empty_trie_root::>()) } impl TrieBackend, H> @@ -92,32 +58,16 @@ where &mut self, changes: T, ) { - let mut new_child_roots = Vec::new(); - let mut root_map = None; - let root = self.root().clone(); - for (child_info, map) in changes { - if let Some(child_info) = child_info.as_ref() { - let prefix_storage_key = child_info.prefixed_storage_key(); - let ch = insert_into_memory_db::(root, self.backend_storage_mut(), map.clone().into_iter()); - new_child_roots.push((prefix_storage_key.into_inner(), Some(ch.as_ref().into()))); - } else { - root_map = Some(map); - } - } + let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); + let (root, transaction) = self.full_storage_root( + top.iter().map(|(_, v)| v).flatten().map(|(k, v)| (&k[..], v.as_deref())), + child.iter() + .filter_map(|v| + v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) + ), + ); - let root = match root_map { - Some(map) => insert_into_memory_db::( - root, - self.backend_storage_mut(), - map.into_iter().chain(new_child_roots.into_iter()), - ), - None => insert_into_memory_db::( - root, - self.backend_storage_mut(), - new_child_roots.into_iter(), - ), - }; - self.essence.set_root(root); + self.apply_transaction(root, transaction); } /// Merge trie nodes into this backend. @@ -127,6 +77,12 @@ where Self::new(clone, root) } + /// Apply the given transaction to this backend and set the root to the given value. + pub fn apply_transaction(&mut self, root: H::Out, transaction: MemoryDB) { + self.backend_storage_mut().consolidate(transaction); + self.essence.set_root(root); + } + /// Compare with another in-memory backend. pub fn eq(&self, other: &Self) -> bool { self.root() == other.root() @@ -158,7 +114,9 @@ where { fn from(inner: HashMap, BTreeMap>) -> Self { let mut backend = new_in_mem(); - backend.insert(inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect()))); + backend.insert( + inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + ); backend } } @@ -232,4 +190,16 @@ mod tests { let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); } + + #[test] + fn insert_multiple_times_child_data_works() { + let mut storage = new_in_mem::(); + let child_info = ChildInfo::new_default(b"1"); + + storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); + storage.insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); + + assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); + assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); + } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 28148b6411a13..31d4eacc4e586 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -119,6 +119,8 @@ pub use crate::overlayed_changes::{ OverlayedChanges, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, StorageChanges, StorageTransactionCache, + OffchainChangesCollection, + OffchainOverlayedChanges, }; pub use crate::backend::Backend; pub use crate::trie_backend_essence::{TrieBackendStorage, Storage}; @@ -172,7 +174,6 @@ mod execution { use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use sp_core::{ - offchain::storage::OffchainOverlayedChanges, storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, traits::{CodeExecutor, CallInWasmExt, RuntimeCode, SpawnNamed}, }; @@ -299,7 +300,6 @@ mod execution { method: &'a str, call_data: &'a [u8], overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, extensions: Extensions, changes_trie_state: Option>, storage_transaction_cache: Option<&'a mut StorageTransactionCache>, @@ -329,7 +329,6 @@ mod execution { backend: &'a B, changes_trie_state: Option>, overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, exec: &'a Exec, method: &'a str, call_data: &'a [u8], @@ -347,7 +346,6 @@ mod execution { call_data, extensions, overlay, - offchain_overlay, changes_trie_state, storage_transaction_cache: None, runtime_code, @@ -407,7 +405,6 @@ mod execution { let mut ext = Ext::new( self.overlay, - self.offchain_overlay, cache, self.backend, self.changes_trie_state.clone(), @@ -621,13 +618,11 @@ mod execution { N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let mut offchain_overlay = OffchainOverlayedChanges::default(); let proving_backend = proving_backend::ProvingBackend::new(trie_backend); let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, None, overlay, - &mut offchain_overlay, exec, method, call_data, @@ -691,12 +686,10 @@ mod execution { N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut sm = StateMachine::<_, H, N, Exec>::new( trie_backend, None, overlay, - &mut offchain_overlay, exec, method, call_data, @@ -879,7 +872,6 @@ mod tests { use std::{result, collections::HashMap}; use codec::Decode; use sp_core::{ - offchain::storage::OffchainOverlayedChanges, storage::ChildInfo, NativeOrEncoded, NeverNativeValue, traits::CodeExecutor, }; @@ -966,14 +958,12 @@ mod tests { fn execute_works() { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -998,14 +988,12 @@ mod tests { fn execute_works_with_native_else_wasm() { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1027,14 +1015,12 @@ mod tests { let mut consensus_failed = false; let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1118,11 +1104,9 @@ mod tests { overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())); { - let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1147,6 +1131,82 @@ mod tests { ); } + #[test] + fn limited_child_kill_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from(initial); + + let mut overlay = OverlayedChanges::default(); + overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())); + + { + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), false); + } + + assert_eq!( + overlay.children() + .flat_map(|(iter, _child_info)| iter) + .map(|(k, v)| (k.clone(), v.value().clone())) + .collect::>(), + map![ + b"1".to_vec() => None.into(), + b"2".to_vec() => None.into(), + b"3".to_vec() => None.into(), + b"4".to_vec() => None.into(), + b"a".to_vec() => None.into(), + b"b".to_vec() => None.into(), + ], + ); + } + + #[test] + fn limited_child_kill_off_by_one_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from(initial); + let mut overlay = OverlayedChanges::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.kill_child_storage(&child_info, Some(0)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(1)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(3)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(4)), true); + assert_eq!(ext.kill_child_storage(&child_info, Some(5)), true); + } + #[test] fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); @@ -1154,11 +1214,9 @@ mod tests { let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1179,6 +1237,7 @@ mod tests { ); ext.kill_child_storage( child_info, + None, ); assert_eq!( ext.child_storage( @@ -1201,12 +1260,10 @@ mod tests { let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1223,7 +1280,6 @@ mod tests { { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1242,7 +1298,6 @@ mod tests { { let ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1265,14 +1320,12 @@ mod tests { let mut cache = StorageTransactionCache::default(); let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut overlay = OverlayedChanges::default(); // For example, block initialization with event. { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1287,7 +1340,6 @@ mod tests { { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1312,7 +1364,6 @@ mod tests { { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1338,7 +1389,6 @@ mod tests { { let ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1414,14 +1464,12 @@ mod tests { use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, &backend, changes_trie::disabled_state::<_, u64>(), @@ -1462,11 +1510,9 @@ mod tests { assert_eq!(overlay.storage(b"bbb"), None); { - let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1490,14 +1536,12 @@ mod tests { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index 5e4fd77c68563..d25f4807aa979 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,6 +25,7 @@ use std::collections::HashSet as Set; use sp_std::collections::btree_set::BTreeSet as Set; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use sp_std::hash::Hash; use smallvec::SmallVec; use crate::warn; @@ -32,8 +33,8 @@ const PROOF_OVERLAY_NON_EMPTY: &str = "\ An OverlayValue is always created with at least one transaction and dropped as soon as the last transaction is removed; qed"; -type DirtyKeysSets = SmallVec<[Set; 5]>; -type Transactions = SmallVec<[InnerValue; 5]>; +type DirtyKeysSets = SmallVec<[Set; 5]>; +type Transactions = SmallVec<[InnerValue; 5]>; /// Error returned when trying to commit or rollback while no transaction is open or /// when the runtime is trying to close a transaction started by the client. @@ -62,32 +63,46 @@ pub enum ExecutionMode { #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] -struct InnerValue { +struct InnerValue { /// Current value. None if value has been deleted. - value: Option, + value: V, /// The set of extrinsic indices where the values has been changed. /// Is filled only if runtime has announced changes trie support. extrinsics: Extrinsics, } /// An overlay that contains all versions of a value for a specific key. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[cfg_attr(test, derive(PartialEq))] -pub struct OverlayedValue { +pub struct OverlayedEntry { /// The individual versions of that value. /// One entry per transactions during that the value was actually written. - transactions: Transactions, + transactions: Transactions, +} + +impl Default for OverlayedEntry { + fn default() -> Self { + Self { + transactions: SmallVec::new(), + } + } } +/// History of value, with removal support. +pub type OverlayedValue = OverlayedEntry>; + +/// Change set for basic key value with extrinsics index recording and removal support. +pub type OverlayedChangeSet = OverlayedMap>; + /// Holds a set of changes with the ability modify them using nested transactions. -#[derive(Debug, Default, Clone)] -pub struct OverlayedChangeSet { +#[derive(Debug, Clone)] +pub struct OverlayedMap { /// Stores the changes that this overlay constitutes. - changes: BTreeMap, + changes: BTreeMap>, /// Stores which keys are dirty per transaction. Needed in order to determine which /// values to merge into the parent transaction on commit. The length of this vector /// therefore determines how many nested transactions are currently open (depth). - dirty_keys: DirtyKeysSets, + dirty_keys: DirtyKeysSets, /// The number of how many transactions beginning from the first transactions are started /// by the client. Those transactions are protected against close (commit, rollback) /// when in runtime mode. @@ -96,16 +111,32 @@ pub struct OverlayedChangeSet { execution_mode: ExecutionMode, } +impl Default for OverlayedMap { + fn default() -> Self { + Self { + changes: BTreeMap::new(), + dirty_keys: SmallVec::new(), + num_client_transactions: Default::default(), + execution_mode: Default::default(), + } + } +} + impl Default for ExecutionMode { fn default() -> Self { Self::Client } } -impl OverlayedValue { +impl OverlayedEntry { /// The value as seen by the current transaction. - pub fn value(&self) -> Option<&StorageValue> { - self.transactions.last().expect(PROOF_OVERLAY_NON_EMPTY).value.as_ref() + pub fn value_ref(&self) -> &V { + &self.transactions.last().expect(PROOF_OVERLAY_NON_EMPTY).value + } + + /// The value as seen by the current transaction. + pub fn into_value(mut self) -> V { + self.transactions.pop().expect(PROOF_OVERLAY_NON_EMPTY).value } /// Unique list of extrinsic indices which modified the value. @@ -116,12 +147,12 @@ impl OverlayedValue { } /// Mutable reference to the most recent version. - fn value_mut(&mut self) -> &mut Option { + fn value_mut(&mut self) -> &mut V { &mut self.transactions.last_mut().expect(PROOF_OVERLAY_NON_EMPTY).value } /// Remove the last version and return it. - fn pop_transaction(&mut self) -> InnerValue { + fn pop_transaction(&mut self) -> InnerValue { self.transactions.pop().expect(PROOF_OVERLAY_NON_EMPTY) } @@ -136,14 +167,14 @@ impl OverlayedValue { /// rolled back when required. fn set( &mut self, - value: Option, + value: V, first_write_in_tx: bool, at_extrinsic: Option, ) { if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, - .. Default::default() + extrinsics: Default::default(), }); } else { *self.value_mut() = value; @@ -155,15 +186,22 @@ impl OverlayedValue { } } +impl OverlayedEntry> { + /// The value as seen by the current transaction. + pub fn value(&self) -> Option<&StorageValue> { + self.value_ref().as_ref() + } +} + /// Inserts a key into the dirty set. /// /// Returns true iff we are currently have at least one open transaction and if this /// is the first write to the given key that transaction. -fn insert_dirty(set: &mut DirtyKeysSets, key: StorageKey) -> bool { +fn insert_dirty(set: &mut DirtyKeysSets, key: K) -> bool { set.last_mut().map(|dk| dk.insert(key)).unwrap_or_default() } -impl OverlayedChangeSet { +impl OverlayedMap { /// Create a new changeset at the same transaction state but without any contents. /// /// This changeset might be created when there are already open transactions. @@ -171,10 +209,10 @@ impl OverlayedChangeSet { pub fn spawn_child(&self) -> Self { use sp_std::iter::repeat; Self { + changes: Default::default(), dirty_keys: repeat(Set::new()).take(self.transaction_depth()).collect(), num_client_transactions: self.num_client_transactions, execution_mode: self.execution_mode, - .. Default::default() } } @@ -184,7 +222,11 @@ impl OverlayedChangeSet { } /// Get an optional reference to the value stored for the specified key. - pub fn get(&self, key: &[u8]) -> Option<&OverlayedValue> { + pub fn get(&self, key: &Q) -> Option<&OverlayedEntry> + where + K: sp_std::borrow::Borrow, + Q: Ord + ?Sized, + { self.changes.get(key) } @@ -193,72 +235,30 @@ impl OverlayedChangeSet { /// Can be rolled back or committed when called inside a transaction. pub fn set( &mut self, - key: StorageKey, - value: Option, + key: K, + value: V, at_extrinsic: Option, ) { let overlayed = self.changes.entry(key.clone()).or_default(); overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } - /// Get a mutable reference for a value. - /// - /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn modify( - &mut self, - key: StorageKey, - init: impl Fn() -> StorageValue, - at_extrinsic: Option, - ) -> &mut Option { - let overlayed = self.changes.entry(key.clone()).or_default(); - let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); - let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { - if first_write_in_tx { - Some(tx.value.clone()) - } else { - None - } - } else { - Some(Some(init())) - }; - - if let Some(cloned) = clone_into_new_tx { - overlayed.set(cloned, first_write_in_tx, at_extrinsic); - } - overlayed.value_mut() - } - - /// Set all values to deleted which are matched by the predicate. - /// - /// Can be rolled back or committed when called inside a transaction. - pub fn clear_where( - &mut self, - predicate: impl Fn(&[u8], &OverlayedValue) -> bool, - at_extrinsic: Option, - ) { - for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); - } - } - /// Get a list of all changes as seen by current transaction. - pub fn changes(&self) -> impl Iterator { + pub fn changes(&self) -> impl Iterator)> { self.changes.iter() } - /// Get the change that is next to the supplied key. - pub fn next_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { - use sp_std::ops::Bound; - let range = (Bound::Excluded(key), Bound::Unbounded); - self.changes.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)) + /// Get a list of all changes as seen by current transaction, consumes + /// the overlay. + pub fn into_changes(self) -> impl Iterator)> { + self.changes.into_iter() } /// Consume this changeset and return all committed changes. /// /// Panics: /// Panics if there are open transactions: `transaction_depth() > 0` - pub fn drain_commited(self) -> impl Iterator)> { + pub fn drain_commited(self) -> impl Iterator { assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) } @@ -384,6 +384,56 @@ impl OverlayedChangeSet { } } +impl OverlayedChangeSet { + /// Get a mutable reference for a value. + /// + /// Can be rolled back or committed when called inside a transaction. + #[must_use = "A change was registered, so this value MUST be modified."] + pub fn modify( + &mut self, + key: StorageKey, + init: impl Fn() -> StorageValue, + at_extrinsic: Option, + ) -> &mut Option { + let overlayed = self.changes.entry(key.clone()).or_default(); + let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); + let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { + if first_write_in_tx { + Some(tx.value.clone()) + } else { + None + } + } else { + Some(Some(init())) + }; + + if let Some(cloned) = clone_into_new_tx { + overlayed.set(cloned, first_write_in_tx, at_extrinsic); + } + overlayed.value_mut() + } + + /// Set all values to deleted which are matched by the predicate. + /// + /// Can be rolled back or committed when called inside a transaction. + pub fn clear_where( + &mut self, + predicate: impl Fn(&[u8], &OverlayedValue) -> bool, + at_extrinsic: Option, + ) { + for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { + val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); + } + } + + /// Get the change that is next to the supplied key. + pub fn next_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { + use sp_std::ops::Bound; + let range = (Bound::Excluded(key), Bound::Unbounded); + self.changes.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 6ef09fc81505d..285bf2a73a148 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,9 @@ //! The overlayed changes to state. mod changeset; +mod offchain; +pub use offchain::OffchainOverlayedChanges; use crate::{ backend::Backend, stats::StateMachineStats, @@ -42,8 +44,7 @@ use sp_std::collections::btree_map::{BTreeMap as Map, Entry as MapEntry}; use sp_std::collections::btree_set::BTreeSet; use codec::{Decode, Encode}; use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; -#[cfg(feature = "std")] -use sp_core::offchain::storage::OffchainOverlayedChanges; +use sp_core::offchain::OffchainOverlayedChange; use hash_db::Hasher; use crate::DefaultError; use sp_externalities::{Extensions, Extension}; @@ -65,6 +66,9 @@ pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; +/// In memory array of storage values. +pub type OffchainChangesCollection = Vec<((Vec, Vec), OffchainOverlayedChange)>; + /// Keep trace of extrinsics index for a modified value. #[derive(Debug, Default, Eq, PartialEq, Clone)] pub struct Extrinsics(Vec); @@ -97,6 +101,8 @@ pub struct OverlayedChanges { top: OverlayedChangeSet, /// Child storage changes. The map key is the child storage key without the common prefix. children: Map, + /// Offchain related changes. + offchain: OffchainOverlayedChanges, /// True if extrinsics stats must be collected. collect_extrinsics: bool, /// Collect statistic on this execution. @@ -115,8 +121,7 @@ pub struct StorageChanges { /// All changes to the child storages. pub child_storage_changes: ChildStorageCollection, /// Offchain state changes to write to the offchain database. - #[cfg(feature = "std")] - pub offchain_storage_changes: OffchainOverlayedChanges, + pub offchain_storage_changes: OffchainChangesCollection, /// A transaction for the backend that contains all changes from /// [`main_storage_changes`](StorageChanges::main_storage_changes) and from /// [`child_storage_changes`](StorageChanges::child_storage_changes). @@ -140,7 +145,7 @@ impl StorageChanges { pub fn into_inner(self) -> ( StorageCollection, ChildStorageCollection, - OffchainOverlayedChanges, + OffchainChangesCollection, Transaction, H::Out, Option>, @@ -202,7 +207,6 @@ impl Default for StorageChanges Self { main_storage_changes: Default::default(), child_storage_changes: Default::default(), - #[cfg(feature = "std")] offchain_storage_changes: Default::default(), transaction: Default::default(), transaction_storage_root: Default::default(), @@ -366,12 +370,13 @@ impl OverlayedChanges { /// transaction was open. Any transaction must be closed by either `rollback_transaction` or /// `commit_transaction` before this overlay can be converted into storage changes. /// - /// Changes made without any open transaction are committed immediatly. + /// Changes made without any open transaction are committed immediately. pub fn start_transaction(&mut self) { self.top.start_transaction(); for (_, (changeset, _)) in self.children.iter_mut() { changeset.start_transaction(); } + self.offchain.overlay_mut().start_transaction(); } /// Rollback the last transaction started by `start_transaction`. @@ -385,6 +390,8 @@ impl OverlayedChanges { .expect("Top and children changesets are started in lockstep; qed"); !changeset.is_empty() }); + self.offchain.overlay_mut().rollback_transaction() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -398,6 +405,8 @@ impl OverlayedChanges { changeset.commit_transaction() .expect("Top and children changesets are started in lockstep; qed"); } + self.offchain.overlay_mut().commit_transaction() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -411,6 +420,8 @@ impl OverlayedChanges { changeset.enter_runtime() .expect("Top and children changesets are entering runtime in lockstep; qed") } + self.offchain.overlay_mut().enter_runtime() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -424,6 +435,8 @@ impl OverlayedChanges { changeset.exit_runtime() .expect("Top and children changesets are entering runtime in lockstep; qed"); } + self.offchain.overlay_mut().exit_runtime() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -449,6 +462,16 @@ impl OverlayedChanges { ) } + /// Consume all changes (top + children) and return them. + /// + /// After calling this function no more changes are contained in this changeset. + /// + /// Panics: + /// Panics if `transaction_depth() > 0` + pub fn offchain_drain_committed(&mut self) -> impl Iterator { + self.offchain.drain() + } + /// Get an iterator over all child changes as seen by the current transaction. pub fn children(&self) -> impl Iterator, &ChildInfo)> { @@ -518,12 +541,12 @@ impl OverlayedChanges { .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); let (main_storage_changes, child_storage_changes) = self.drain_committed(); + let offchain_storage_changes = self.offchain_drain_committed().collect(); Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), - #[cfg(feature = "std")] - offchain_storage_changes: Default::default(), + offchain_storage_changes, transaction, transaction_storage_root, #[cfg(feature = "std")] @@ -629,6 +652,20 @@ impl OverlayedChanges { overlay.next_change(key) ) } + + /// Read only access ot offchain overlay. + pub fn offchain(&self) -> &OffchainOverlayedChanges { + &self.offchain + } + + /// Write a key value pair to the offchain storage overlay. + pub fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { + use sp_core::offchain::STORAGE_PREFIX; + match value { + Some(value) => self.offchain.set(STORAGE_PREFIX, key, value), + None => self.offchain.remove(STORAGE_PREFIX, key), + } + } } #[cfg(feature = "std")] @@ -767,6 +804,61 @@ mod tests { assert!(overlayed.storage(&key).unwrap().is_none()); } + #[test] + fn offchain_overlayed_storage_transactions_works() { + use sp_core::offchain::STORAGE_PREFIX; + fn check_offchain_content( + state: &OverlayedChanges, + nb_commit: usize, + expected: Vec<(Vec, Option>)>, + ) { + let mut state = state.clone(); + for _ in 0..nb_commit { + state.commit_transaction().unwrap(); + } + let offchain_data: Vec<_> = state.offchain_drain_committed().collect(); + let expected: Vec<_> = expected.into_iter().map(|(key, value)| { + let change = match value { + Some(value) => OffchainOverlayedChange::SetValue(value), + None => OffchainOverlayedChange::Remove, + }; + ((STORAGE_PREFIX.to_vec(), key), change) + }).collect(); + assert_eq!(offchain_data, expected); + } + + let mut overlayed = OverlayedChanges::default(); + + let key = vec![42, 69, 169, 142]; + + check_offchain_content(&overlayed, 0, vec![]); + + overlayed.start_transaction(); + + overlayed.set_offchain_storage(key.as_slice(), Some(&[1, 2, 3][..])); + check_offchain_content(&overlayed, 1, vec![(key.clone(), Some(vec![1, 2, 3]))]); + + overlayed.commit_transaction().unwrap(); + + check_offchain_content(&overlayed, 0, vec![(key.clone(), Some(vec![1, 2, 3]))]); + + overlayed.start_transaction(); + + overlayed.set_offchain_storage(key.as_slice(), Some(&[][..])); + check_offchain_content(&overlayed, 1, vec![(key.clone(), Some(vec![]))]); + + overlayed.set_offchain_storage(key.as_slice(), None); + check_offchain_content(&overlayed, 1, vec![(key.clone(), None)]); + + overlayed.rollback_transaction().unwrap(); + + check_offchain_content(&overlayed, 0, vec![(key.clone(), Some(vec![1, 2, 3]))]); + + overlayed.set_offchain_storage(key.as_slice(), None); + check_offchain_content(&overlayed, 0, vec![(key.clone(), None)]); + } + + #[test] fn overlayed_storage_root_works() { let initial: BTreeMap<_, _> = vec![ @@ -789,11 +881,9 @@ mod tests { overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())); overlay.set_storage(b"doug".to_vec(), None); - let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, &backend, crate::changes_trie::disabled_state::<_, u64>(), diff --git a/primitives/state-machine/src/overlayed_changes/offchain.rs b/primitives/state-machine/src/overlayed_changes/offchain.rs new file mode 100644 index 0000000000000..4128be24bc546 --- /dev/null +++ b/primitives/state-machine/src/overlayed_changes/offchain.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Overlayed changes for offchain indexing. + +use sp_core::offchain::OffchainOverlayedChange; +use sp_std::prelude::Vec; +use super::changeset::OverlayedMap; + +/// In-memory storage for offchain workers recoding changes for the actual offchain storage implementation. +#[derive(Debug, Clone, Default)] +pub struct OffchainOverlayedChanges(OverlayedMap<(Vec, Vec), OffchainOverlayedChange>); + +/// Item for iterating over offchain changes. +/// +/// First element i a tuple of `(prefix, key)`, second element ist the actual change +/// (remove or set value). +type OffchainOverlayedChangesItem<'i> = (&'i (Vec, Vec), &'i OffchainOverlayedChange); + +/// Iterator over offchain changes, owned memory version. +type OffchainOverlayedChangesItemOwned = ((Vec, Vec), OffchainOverlayedChange); + +impl OffchainOverlayedChanges { + /// Consume the offchain storage and iterate over all key value pairs. + pub fn into_iter(self) -> impl Iterator { + self.0.into_changes().map(|kv| (kv.0, kv.1.into_value())) + } + + /// Iterate over all key value pairs by reference. + pub fn iter<'a>(&'a self) -> impl Iterator> { + self.0.changes().map(|kv| (kv.0, kv.1.value_ref())) + } + + /// Drain all elements of changeset. + pub fn drain(&mut self) -> impl Iterator { + sp_std::mem::take(self).into_iter() + } + + /// Remove a key and its associated value from the offchain database. + pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { + let _ = self.0.set( + (prefix.to_vec(), key.to_vec()), + OffchainOverlayedChange::Remove, + None, + ); + } + + /// Set the value associated with a key under a prefix to the value provided. + pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { + let _ = self.0.set( + (prefix.to_vec(), key.to_vec()), + OffchainOverlayedChange::SetValue(value.to_vec()), + None, + ); + } + + /// Obtain a associated value to the given key in storage with prefix. + pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { + let key = (prefix.to_vec(), key.to_vec()); + self.0.get(&key).map(|entry| entry.value_ref()).cloned() + } + + /// Reference to inner change set. + pub fn overlay(&self) -> &OverlayedMap<(Vec, Vec), OffchainOverlayedChange> { + &self.0 + } + + /// Mutable reference to inner change set. + pub fn overlay_mut(&mut self) -> &mut OverlayedMap<(Vec, Vec), OffchainOverlayedChange> { + &mut self.0 + } +} + +#[cfg(test)] +mod test { + use super::*; + use sp_core::offchain::STORAGE_PREFIX; + + #[test] + fn test_drain() { + let mut ooc = OffchainOverlayedChanges::default(); + ooc.set(STORAGE_PREFIX, b"kkk", b"vvv"); + let drained = ooc.drain().count(); + assert_eq!(drained, 1); + let leftover = ooc.iter().count(); + assert_eq!(leftover, 0); + + ooc.set(STORAGE_PREFIX, b"a", b"v"); + ooc.set(STORAGE_PREFIX, b"b", b"v"); + ooc.set(STORAGE_PREFIX, b"c", b"v"); + ooc.set(STORAGE_PREFIX, b"d", b"v"); + ooc.set(STORAGE_PREFIX, b"e", b"v"); + assert_eq!(ooc.iter().count(), 5); + } + + #[test] + fn test_accumulated_set_remove_set() { + let mut ooc = OffchainOverlayedChanges::default(); + ooc.set(STORAGE_PREFIX, b"ppp", b"qqq"); + ooc.remove(STORAGE_PREFIX, b"ppp"); + // keys are equiv, so it will overwrite the value and the overlay will contain + // one item + assert_eq!(ooc.iter().count(), 1); + + ooc.set(STORAGE_PREFIX, b"ppp", b"rrr"); + let mut iter = ooc.into_iter(); + assert_eq!( + iter.next(), + Some( + ((STORAGE_PREFIX.to_vec(), b"ppp".to_vec()), + OffchainOverlayedChange::SetValue(b"rrr".to_vec())) + ) + ); + assert_eq!(iter.next(), None); + } +} diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0888c561cae30..6b87aa12eb1af 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -204,12 +204,12 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(child_info, f) + self.0.apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 1b70958145c70..dee7c9e337cda 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -131,7 +131,8 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn kill_child_storage( &mut self, _child_info: &ChildInfo, - ) { + _limit: Option, + ) -> bool { unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } @@ -155,8 +156,6 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("storage_append is not supported in ReadOnlyExternalities") } - fn chain_id(&self) -> u64 { 42 } - fn storage_root(&mut self) -> Vec { unimplemented!("storage_root is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/stats.rs b/primitives/state-machine/src/stats.rs index f84de6a5bad07..9d4ac27e5e94f 100644 --- a/primitives/state-machine/src/stats.rs +++ b/primitives/state-machine/src/stats.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 4dcd308285625..a6f9d06824642 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,10 +33,7 @@ use crate::{ use codec::{Decode, Encode}; use hash_db::Hasher; use sp_core::{ - offchain::{ - testing::TestPersistentOffchainDB, - storage::OffchainOverlayedChanges - }, + offchain::testing::TestPersistentOffchainDB, storage::{ well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, Storage, @@ -52,7 +49,6 @@ where H::Out: codec::Codec + Ord, { overlay: OverlayedChanges, - offchain_overlay: OffchainOverlayedChanges, offchain_db: TestPersistentOffchainDB, storage_transaction_cache: StorageTransactionCache< as Backend>::Transaction, H, N @@ -71,7 +67,6 @@ impl TestExternalities pub fn ext(&mut self) -> Ext> { Ext::new( &mut self.overlay, - &mut self.offchain_overlay, &mut self.storage_transaction_cache, &self.backend, match self.changes_trie_config.clone() { @@ -109,8 +104,6 @@ impl TestExternalities storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); - let offchain_overlay = OffchainOverlayedChanges::enabled(); - let mut extensions = Extensions::default(); extensions.register(TaskExecutorExt::new(TaskExecutor::new())); @@ -118,7 +111,6 @@ impl TestExternalities TestExternalities { overlay, - offchain_overlay, offchain_db, changes_trie_config, extensions, @@ -128,9 +120,14 @@ impl TestExternalities } } + /// Returns the overlayed changes. + pub fn overlayed_changes(&self) -> &OverlayedChanges { + &self.overlay + } + /// Move offchain changes from overlay to the persistent store. pub fn persist_offchain_overlay(&mut self) { - self.offchain_db.apply_offchain_changes(&mut self.offchain_overlay); + self.offchain_db.apply_offchain_changes(self.overlay.offchain_drain_committed()); } /// A shared reference type around the offchain worker storage. @@ -153,8 +150,11 @@ impl TestExternalities &mut self.changes_trie_storage } - /// Return a new backend with all pending value. - pub fn commit_all(&self) -> InMemoryBackend { + /// Return a new backend with all pending changes. + /// + /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open + /// transactions. + fn as_backend(&self) -> InMemoryBackend { let top: Vec<_> = self.overlay.changes() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect(); @@ -172,6 +172,23 @@ impl TestExternalities self.backend.update(transaction) } + /// Commit all pending changes to the underlying backend. + /// + /// # Panic + /// + /// This will panic if there are still open transactions. + pub fn commit_all(&mut self) -> Result<(), String> { + let changes = self.overlay.drain_storage_changes::<_, _, N>( + &self.backend, + None, + Default::default(), + &mut Default::default(), + )?; + + self.backend.apply_transaction(changes.transaction_storage_root, changes.transaction); + Ok(()) + } + /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure. @@ -209,7 +226,7 @@ impl PartialEq for TestExternalities /// This doesn't test if they are in the same state, only if they contains the /// same data at this state fn eq(&self, other: &TestExternalities) -> bool { - self.commit_all().eq(&other.commit_all()) + self.as_backend().eq(&other.as_backend()) } } @@ -258,7 +275,7 @@ impl sp_externalities::ExtensionStore for TestExternalities where #[cfg(test)] mod tests { use super::*; - use sp_core::{H256, traits::Externalities}; + use sp_core::{H256, traits::Externalities, storage::ChildInfo}; use sp_runtime::traits::BlakeTwo256; use hex_literal::hex; @@ -289,4 +306,45 @@ mod tests { fn assert_send() {} assert_send::>(); } + + #[test] + fn commit_all_and_kill_child_storage() { + let mut ext = TestExternalities::::default(); + let child_info = ChildInfo::new_default(&b"test_child"[..]); + + { + let mut ext = ext.ext(); + ext.place_child_storage(&child_info, b"doe".to_vec(), Some(b"reindeer".to_vec())); + ext.place_child_storage(&child_info, b"dog".to_vec(), Some(b"puppy".to_vec())); + ext.place_child_storage(&child_info, b"dog2".to_vec(), Some(b"puppy2".to_vec())); + } + + ext.commit_all().unwrap(); + + { + let mut ext = ext.ext(); + + assert!(!ext.kill_child_storage(&child_info, Some(2)), "Should not delete all keys"); + + assert!(ext.child_storage(&child_info, &b"doe"[..]).is_none()); + assert!(ext.child_storage(&child_info, &b"dog"[..]).is_none()); + assert!(ext.child_storage(&child_info, &b"dog2"[..]).is_some()); + } + } + + #[test] + fn as_backend_generates_same_backend_as_commit_all() { + let mut ext = TestExternalities::::default(); + { + let mut ext = ext.ext(); + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + } + + let backend = ext.as_backend(); + + ext.commit_all().unwrap(); + assert!(ext.backend.eq(&backend), "Both backend should be equal."); + } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 4eaa0870baed0..3e74f2d3df4b8 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -113,12 +113,12 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(child_info, f) + self.essence.apply_to_child_keys_while(child_info, f) } fn for_child_keys_with_prefix( diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 37bbbb7cf9822..c085099da77d8 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -190,7 +190,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( + /// Aborts as soon as `f` returns false. + pub fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index 5b988cabc150a..bafa1ea7ef414 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/std/src/lib.rs b/primitives/std/src/lib.rs index b323c43720da1..6acf4b75967ae 100644 --- a/primitives/std/src/lib.rs +++ b/primitives/std/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/std/with_std.rs b/primitives/std/with_std.rs index 92e804b27e1d0..b044eb2912274 100644 --- a/primitives/std/with_std.rs +++ b/primitives/std/with_std.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/std/without_std.rs b/primitives/std/without_std.rs index 3c130d547a1e4..697a0787e5312 100755 --- a/primitives/std/without_std.rs +++ b/primitives/std/without_std.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 4f14ba38f2147..7a984d9205690 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" @@ -14,12 +14,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.3.1", optional = true } ref-cast = "1.0.0" -sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-debug-derive = { version = "3.0.0", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index b253733e7b29e..1e9f9766072e4 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -180,6 +180,16 @@ pub mod well_known_keys { // Other code might depend on this, so be careful changing this. key.starts_with(CHILD_STORAGE_KEY_PREFIX) } + + /// Returns if the given `key` starts with [`CHILD_STORAGE_KEY_PREFIX`] or collides with it. + pub fn starts_with_child_storage_key(key: &[u8]) -> bool { + if key.len() > CHILD_STORAGE_KEY_PREFIX.len() { + key.starts_with(CHILD_STORAGE_KEY_PREFIX) + } else { + CHILD_STORAGE_KEY_PREFIX.starts_with(key) + } + } + } /// Information related to a child state. diff --git a/primitives/tasks/Cargo.toml b/primitives/tasks/Cargo.toml index 0c0f410824c81..0a361b6c8dbbc 100644 --- a/primitives/tasks/Cargo.toml +++ b/primitives/tasks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tasks" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.8", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-externalities = { version = "0.9.0", optional = true, path = "../externalities" } +sp-io = { version = "3.0.0", default-features = false, path = "../io" } +sp-runtime-interface = { version = "3.0.0", default-features = false, path = "../runtime-interface" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } [dev-dependencies] -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } +codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } [features] default = ["std"] diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 8994d069e4c76..249222ec71c3d 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -118,7 +118,8 @@ impl Externalities for AsyncExternalities { fn kill_child_storage( &mut self, _child_info: &ChildInfo, - ) { + _limit: Option, + ) -> bool { panic!("`kill_child_storage`: should not be used in async externalities!") } @@ -142,8 +143,6 @@ impl Externalities for AsyncExternalities { panic!("`storage_append`: should not be used in async externalities!") } - fn chain_id(&self) -> u64 { 42 } - fn storage_root(&mut self) -> Vec { panic!("`storage_root`: should not be used in async externalities!") } diff --git a/primitives/tasks/src/lib.rs b/primitives/tasks/src/lib.rs index 030e178109d7b..96aca0e1cef6b 100644 --- a/primitives/tasks/src/lib.rs +++ b/primitives/tasks/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 18468a33ae42e..fbf29db96fa46 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } [features] default = [ diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 27c7ec5e10e65..ed408f338e49a 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 79dae29102220..53fb37d4deb43 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-timestamp" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -impl-trait-for-tuples = "0.1.3" +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "3.0.0", default-features = false, path = "../inherents" } +impl-trait-for-tuples = "0.2.1" wasm-timer = { version = "0.2", optional = true } [features] diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index 89bfcc20e0e6d..59f792678c4be 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 1000952b39fd4..13804353eca76 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tracing" -version = "2.0.0" +version = "3.0.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -18,12 +18,12 @@ features = ["with-tracing"] targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] [dependencies] -sp-std = { version = "2.0.0", path = "../std", default-features = false} -codec = { version = "1.3.1", package = "parity-scale-codec", default-features = false, features = ["derive"]} -tracing = { version = "0.1.21", default-features = false } +sp-std = { version = "3.0.0", path = "../std", default-features = false} +codec = { version = "2.0.0", package = "parity-scale-codec", default-features = false, features = ["derive"]} +tracing = { version = "0.1.22", default-features = false } tracing-core = { version = "0.1.17", default-features = false } log = { version = "0.4.8", optional = true } -tracing-subscriber = { version = "0.2.10", optional = true, features = ["tracing-log"] } +tracing-subscriber = { version = "0.2.15", optional = true, features = ["tracing-log"] } [features] default = [ "std" ] diff --git a/primitives/tracing/src/lib.rs b/primitives/tracing/src/lib.rs index cb67d8a0c5a22..227e1ee994ecb 100644 --- a/primitives/tracing/src/lib.rs +++ b/primitives/tracing/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -164,7 +164,7 @@ macro_rules! within_span { $( $code:tt )* ) => { { - $crate::within_span!($crate::span!($crate::Level::TRACE, $name); $( $code )*) + $crate::within_span!($crate::span!($lvl, $name); $( $code )*) } }; } @@ -233,6 +233,6 @@ macro_rules! enter_span { let __tracing_guard__ = __within_span__.enter(); }; ( $lvl:expr, $name:expr ) => { - $crate::enter_span!($crate::span!($crate::Level::TRACE, $name)) + $crate::enter_span!($crate::span!($lvl, $name)) }; } diff --git a/primitives/tracing/src/types.rs b/primitives/tracing/src/types.rs index 050ac4c314166..725565c371842 100644 --- a/primitives/tracing/src/types.rs +++ b/primitives/tracing/src/types.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 57ba3a28ac3c1..d431e444d457e 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-transaction-pool" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", optional = true } -derive_more = { version = "0.99.2", optional = true } +thiserror = { version = "1.0.21", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", optional = true } +derive_more = { version = "0.99.11", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-blockchain = { version = "2.0.0", optional = true, path = "../blockchain" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-blockchain = { version = "3.0.0", optional = true, path = "../blockchain" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } [features] default = [ "std" ] @@ -31,6 +32,7 @@ std = [ "futures", "log", "serde", + "thiserror", "sp-api/std", "sp-blockchain", "sp-runtime/std", diff --git a/primitives/transaction-pool/src/error.rs b/primitives/transaction-pool/src/error.rs index 531b397cb946c..dd2d6401c1821 100644 --- a/primitives/transaction-pool/src/error.rs +++ b/primitives/transaction-pool/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,49 +25,52 @@ use sp_runtime::transaction_validity::{ pub type Result = std::result::Result; /// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error, derive_more::From)] +#[allow(missing_docs)] pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] + #[error("Unknown transaction validity: {0:?}")] UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] + + #[error("Invalid transaction validity: {0:?}")] InvalidTransaction(InvalidTransaction), + /// The transaction validity returned no "provides" tag. /// /// Such transactions are not accepted to the pool, since we use those tags /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] + #[error("Transaction does not provide any tags, so the pool can't identify it")] NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] + + #[error("Transaction temporarily Banned")] TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] + + #[error("[{0:?}] Already imported")] AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] + + #[error("Too low priority ({} > {})", old, new)] TooLowPriority { /// Transaction already in the pool. old: Priority, /// Transaction entering the pool. new: Priority }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] + #[error("Transaction with cyclic dependency")] CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] + + #[error("Transaction couldn't enter the pool because of the limit")] ImmediatelyDropped, - /// Invalid block id. + + #[error("Transaction cannot be propagated and the local node does not author blocks")] + Unactionable, + + #[from(ignore)] + #[error("{0}")] InvalidBlockId(String), - /// The pool is not accepting future transactions. - #[display(fmt="The pool is not accepting future transactions")] + + #[error("The pool is not accepting future transactions")] RejectedFutureTransaction, } -impl std::error::Error for Error {} - /// Transaction pool error conversion. pub trait IntoPoolError: std::error::Error + Send + Sized { /// Try to extract original `Error` diff --git a/primitives/transaction-pool/src/lib.rs b/primitives/transaction-pool/src/lib.rs index b991c541521c2..276c53443eb71 100644 --- a/primitives/transaction-pool/src/lib.rs +++ b/primitives/transaction-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/transaction-pool/src/pool.rs b/primitives/transaction-pool/src/pool.rs index 6235ca7cdfcf3..b0964cab2d18e 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/primitives/transaction-pool/src/pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/transaction-pool/src/runtime_api.rs b/primitives/transaction-pool/src/runtime_api.rs index 9080c023f5890..e1c3280ca2aaf 100644 --- a/primitives/transaction-pool/src/runtime_api.rs +++ b/primitives/transaction-pool/src/runtime_api.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 7b7629bbf9bb2..4396550a48a8f 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -18,20 +18,20 @@ name = "bench" harness = false [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.0", default-features = false } +trie-db = { version = "0.22.2", default-features = false } trie-root = { version = "0.16.0", default-features = false } -memory-db = { version = "0.24.0", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } +memory-db = { version = "0.26.0", default-features = false } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.25.0" +trie-bench = "0.27.0" trie-standardmap = "0.15.2" criterion = "0.3.3" hex-literal = "0.3.1" -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-runtime = { version = "3.0.0", path = "../runtime" } [features] default = ["std"] diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index d385b4bacd4c0..c2ccb31328aae 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -1,18 +1,19 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use criterion::{Criterion, criterion_group, criterion_main}; criterion_group!(benches, benchmark); diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index 2d3a1b79287c3..8e1d9b974ffd5 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -1,10 +1,19 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 // -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #[cfg(feature="std")] use std::fmt; @@ -40,7 +49,7 @@ impl StdError for Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::Decode(e) => write!(f, "Decode error: {}", e.what()), + Error::Decode(e) => write!(f, "Decode error: {}", e), Error::BadFormat => write!(f, "Bad format"), } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 73a4a8029b2d7..572283f1c027e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Utility functions to interact with Substrate's Base-16 Modified Merkle Patricia tree ("trie"). @@ -184,7 +185,7 @@ pub fn delta_trie_root( DB: hash_db::HashDB, { { - let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -223,9 +224,13 @@ pub fn read_trie_value_with< Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } +/// Determine the empty trie root. +pub fn empty_trie_root() -> ::Out { + L::trie_root::<_, Vec, Vec>(core::iter::empty()) +} + /// Determine the empty child trie root. -pub fn empty_child_trie_root( -) -> ::Out { +pub fn empty_child_trie_root() -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } @@ -271,7 +276,8 @@ pub fn child_delta_trie_root( } /// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( +/// Aborts as soon as `f` returns false. +pub fn for_keys_in_child_trie bool, DB>( keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -290,7 +296,9 @@ pub fn for_keys_in_child_trie( for x in iter { let (key, _) = x?; - f(&key); + if !f(&key) { + break; + } } Ok(()) diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 8a61f372cf2aa..0c923ff024c55 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -1,18 +1,19 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! `NodeCodec` implementation for Substrate's trie format. diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 7aa16292549ed..0fdf6fefbd0bc 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -1,18 +1,19 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! The node header. @@ -37,7 +38,7 @@ pub(crate) enum NodeKind { } impl Encode for NodeHeader { - fn encode_to(&self, output: &mut T) { + fn encode_to(&self, output: &mut T) { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), NodeHeader::Branch(true, nibble_count) => @@ -98,7 +99,7 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator } /// Encodes size and prefix to a stream output. -fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut impl Output) { +fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut W) { for b in size_and_prefix_iterator(size, prefix) { out.push_byte(b) } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 254adc2fcb48a..f0b2bfd4bc3d3 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use sp_std::vec::Vec; use codec::{Encode, Decode}; diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 0c92e673aae93..3a65c5a9190b4 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -1,18 +1,19 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! `TrieStream` implementation for Substrate's trie format. diff --git a/primitives/utils/Cargo.toml b/primitives/utils/Cargo.toml index 80329d2e59ea9..7669cee346d0b 100644 --- a/primitives/utils/Cargo.toml +++ b/primitives/utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-utils" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -10,10 +10,10 @@ description = "I/O for Substrate runtimes" readme = "README.md" [dependencies] -futures = "0.3.4" +futures = "0.3.9" futures-core = "0.3.4" lazy_static = "1.4.0" -prometheus = { version = "0.10.0", default-features = false } +prometheus = { version = "0.11.0", default-features = false } futures-timer = "3.0.2" [features] diff --git a/primitives/utils/src/lib.rs b/primitives/utils/src/lib.rs index 77bcd096561b4..430ec1ecb6f6c 100644 --- a/primitives/utils/src/lib.rs +++ b/primitives/utils/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/utils/src/metrics.rs b/primitives/utils/src/metrics.rs index a66589b5927fe..45d68ae4e6f70 100644 --- a/primitives/utils/src/metrics.rs +++ b/primitives/utils/src/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/utils/src/mpsc.rs b/primitives/utils/src/mpsc.rs index 70baa006bdcdc..b033a5527d84a 100644 --- a/primitives/utils/src/mpsc.rs +++ b/primitives/utils/src/mpsc.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,7 +63,7 @@ mod inner { /// `UNBOUNDED_CHANNELS_COUNTER` pub fn tracing_unbounded(key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { let (s, r) = mpsc::unbounded(); - (TracingUnboundedSender(key.clone(), s), TracingUnboundedReceiver(key,r)) + (TracingUnboundedSender(key, s), TracingUnboundedReceiver(key,r)) } impl TracingUnboundedSender { diff --git a/primitives/utils/src/status_sinks.rs b/primitives/utils/src/status_sinks.rs index 65a560af4eaa5..dc8115670de1e 100644 --- a/primitives/utils/src/status_sinks.rs +++ b/primitives/utils/src/status_sinks.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use futures::{prelude::*, lock::Mutex}; @@ -43,6 +44,12 @@ struct YieldAfter { sender: Option>, } +impl Default for StatusSinks { + fn default() -> Self { + Self::new() + } +} + impl StatusSinks { /// Builds a new empty collection. pub fn new() -> StatusSinks { diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index e9475846246ee..bfb9a742c8689 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] impl-serde = { version = "0.3.1", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 133d0497a2584..24a1b85ed0c38 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index a85b6cd1d118a..1721df4a86685 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } -impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0", path = "../std", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +impl-trait-for-tuples = "0.2.1" +sp-std = { version = "3.0.0", path = "../std", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/wasm-interface/src/lib.rs b/primitives/wasm-interface/src/lib.rs index c432a966056c5..fd200268473b0 100644 --- a/primitives/wasm-interface/src/lib.rs +++ b/primitives/wasm-interface/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/wasm-interface/src/wasmi_impl.rs b/primitives/wasm-interface/src/wasmi_impl.rs index 5931671c97ed4..79110487ffca5 100644 --- a/primitives/wasm-interface/src/wasmi_impl.rs +++ b/primitives/wasm-interface/src/wasmi_impl.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ss58-registry.json b/ss58-registry.json index d4286145a1fb8..cae6577e2157c 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -21,8 +21,8 @@ }, { "prefix": 1, - "network": "reserved1", - "displayName": "This prefix is reserved.", + "network": null, + "displayName": "Bare 32-bit Schnorr/Ristretto (S/R 25519) public key.", "symbols": null, "decimals": null, "standardAccount": null, @@ -39,8 +39,8 @@ }, { "prefix": 3, - "network": "reserved3", - "displayName": "This prefix is reserved.", + "network": null, + "displayName": "Bare 32-bit Ed25519 public key.", "symbols": null, "decimals": null, "standardAccount": null, @@ -120,12 +120,12 @@ }, { "prefix": 12, - "network": "polymath", - "displayName": "Polymath", - "symbols": null, - "decimals": null, + "network": "polymesh", + "displayName": "Polymesh", + "symbols": ["POLYX"], + "decimals": [6], "standardAccount": "*25519", - "website": null + "website": "https://polymath.network/" }, { "prefix": 13, @@ -145,6 +145,15 @@ "standardAccount": "*25519", "website": "https://totemaccounting.com" }, + { + "prefix": 15, + "network": "synesthesia", + "displayName": "Synesthesia", + "symbols": ["SYN"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://synesthesia.network/" + }, { "prefix": 16, "network": "kulupu", @@ -185,10 +194,10 @@ "prefix": 20, "network": "stafi", "displayName": "Stafi", - "symbols": null, - "decimals": null, + "symbols": ["FIS"], + "decimals": [12], "standardAccount": "*25519", - "website": null + "website": "https://stafi.io" }, { "prefix": 21, @@ -235,6 +244,24 @@ "standardAccount": "*25519", "website": "https://zero.io" }, + { + "prefix": 26, + "network": "jupiter", + "displayName": "Jupiter", + "symbols": ["jDOT"], + "decimals": [10], + "standardAccount": "*25519", + "website": "https://jupiter.patract.io" + }, + { + "prefix": 27, + "network": "patract", + "displayName": "Patract", + "symbols": ["pDOT", "pKSM"], + "decimals": [10, 12], + "standardAccount": "*25519", + "website": "https://patract.network" + }, { "prefix": 28, "network": "subsocial", @@ -244,23 +271,41 @@ "standardAccount": "*25519", "website": null }, + { + "prefix": 29, + "network": "cord", + "displayName": "Dhiway CORD Network", + "symbols": ["DCU"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://dhiway.com/" + }, { "prefix": 30, "network": "phala", "displayName": "Phala Network", - "symbols": null, - "decimals": null, + "symbols": ["PHA"], + "decimals": [12], "standardAccount": "*25519", - "website": null + "website": "https://phala.network" + }, + { + "prefix": 31, + "network": "litentry", + "displayName": "Litentry Network", + "symbols": ["LIT"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://litentry.com/" }, { "prefix": 32, "network": "robonomics", - "displayName": "Robonomics Network", - "symbols": null, - "decimals": null, + "displayName": "Robonomics", + "symbols": ["XRT"], + "decimals": [9], "standardAccount": "*25519", - "website": null + "website": "https://robonomics.network" }, { "prefix": 33, @@ -271,6 +316,24 @@ "standardAccount": "*25519", "website": null }, + { + "prefix": 34, + "network": "ares", + "displayName": "Ares Protocol", + "symbols": ["ARES"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://www.aresprotocol.com/" + }, + { + "prefix": 35, + "network": "vln", + "displayName": "Valiu Liquidity Network", + "symbols": ["USDv"], + "decimals": [15], + "standardAccount": "*25519", + "website": "https://valiu.com/" + }, { "prefix": 36, "network": "centrifuge", @@ -289,6 +352,15 @@ "standardAccount": "*25519", "website": "https://nodle.io/" }, + { + "prefix": 38, + "network": "kilt", + "displayName": "KILT Chain", + "symbols": ["KILT"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://kilt.io/" + }, { "prefix": 39, "network": "mathchain", @@ -307,6 +379,15 @@ "standardAccount": "*25519", "website": "https://mathwallet.org" }, + { + "prefix": 41, + "network": "poli", + "displayName": "Polimec Chain", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": "https://polimec.io/" + }, { "prefix": 42, "network": "substrate", @@ -318,8 +399,8 @@ }, { "prefix": 43, - "network": "reserved43", - "displayName": "This prefix is reserved.", + "network": null, + "displayName": "Bare 32-bit ECDSA SECP-256k1 public key.", "symbols": null, "decimals": null, "standardAccount": null, @@ -334,6 +415,15 @@ "standardAccount": "*25519", "website": "https://chainx.org/" }, + { + "prefix": 45, + "network": "uniarts", + "displayName": "UniArts Network", + "symbols": ["UART", "UINK"], + "decimals": [12, 12], + "standardAccount": "*25519", + "website": "https://uniarts.me" + }, { "prefix": 46, "network": "reserved46", @@ -353,13 +443,22 @@ "website": null }, { - "prefix": 48, - "network": "reserved48", - "displayName": "All prefixes 48 and higher are reserved and cannot be allocated.", - "symbols": null, - "decimals": null, - "standardAccount": null, - "website": null + "prefix": 65, + "network": "aventus", + "displayName": "AvN Mainnet", + "symbols": ["AVT"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://aventus.io" + }, + { + "prefix": 66, + "network": "crust", + "displayName": "Crust Network", + "symbols": ["CRU"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://crust.network" } ] } diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index ddadc2cb7177d..0a8849fe98a72 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-utils" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,9 +13,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = { version = "0.3.1", features = ["compat"] } -substrate-test-utils-derive = { version = "0.8.0", path = "./derive" } +substrate-test-utils-derive = { version = "0.9.0", path = "./derive" } tokio = { version = "0.2.13", features = ["macros"] } [dev-dependencies] -sc-service = { version = "0.8.0", path = "../client/service" } -trybuild = { version = "1.0", features = ["diff"] } +sc-service = { version = "0.9.0", path = "../client/service" } +trybuild = { version = "1.0.38", features = [ "diff" ] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 07d28660f6188..1f62e32ddf596 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-client" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,23 +12,23 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } -futures = "0.3.4" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.9" futures01 = { package = "futures", version = "0.1.29" } hash-db = "0.15.2" hex = "0.4" serde = "1.0.55" serde_json = "1.0.55" -sc-client-api = { version = "2.0.0", path = "../../client/api" } -sc-client-db = { version = "0.8.0", features = ["test-helpers"], path = "../../client/db" } -sc-consensus = { version = "0.8.0", path = "../../client/consensus/common" } -sc-executor = { version = "0.8.0", path = "../../client/executor" } -sc-light = { version = "2.0.0", path = "../../client/light" } -sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../client/service" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sc-client-api = { version = "3.0.0", path = "../../client/api" } +sc-client-db = { version = "0.9.0", features = ["test-helpers"], path = "../../client/db" } +sc-consensus = { version = "0.9.0", path = "../../client/consensus/common" } +sc-executor = { version = "0.9.0", path = "../../client/executor" } +sc-light = { version = "3.0.0", path = "../../client/light" } +sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../client/service" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index 43e89c8f10bc1..db3e42f7e01c2 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index bde2156e0cc88..bf5b2b6a04143 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -79,6 +79,7 @@ pub struct TestClientBuilder { keystore: Option, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, + enable_offchain_indexing_api: bool, } impl Default @@ -114,6 +115,7 @@ impl TestClientBuilder TestClientBuilder Self { + self.enable_offchain_indexing_api = true; + self + } + /// Build the test client with the given native executor. pub fn build_with_executor( self, @@ -219,7 +227,10 @@ impl TestClientBuilder"] edition = "2018" license = "Apache-2.0" @@ -10,7 +10,7 @@ description = "Substrate test utilities macros" [dependencies] quote = "1.0.6" -syn = { version = "1.0.33", features = ["full"] } +syn = { version = "1.0.58", features = ["full"] } proc-macro-crate = "0.1.4" [lib] diff --git a/test-utils/derive/src/lib.rs b/test-utils/derive/src/lib.rs index f5d627068963f..7a9954d21d82e 100644 --- a/test-utils/derive/src/lib.rs +++ b/test-utils/derive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index cb6147adf25c6..1a841ac0755a4 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -13,50 +13,50 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0", default-features = false, path = "../../primitives/block-builder" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.24.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0"} -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0"} -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../frame/timestamp" } -sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } -sp-trie = { version = "2.0.0", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.22.0", default-features = false } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-service = { version = "0.8.0", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } -sp-state-machine = { version = "0.8.0", default-features = false, path = "../../primitives/state-machine" } -sp-externalities = { version = "0.8.0", default-features = false, path = "../../primitives/externalities" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "3.0.0", default-features = false, path = "../../primitives/block-builder" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +frame-executive = { version = "3.0.0", default-features = false, path = "../../frame/executive" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "3.0.0", optional = true, path = "../../primitives/keyring" } +memory-db = { version = "0.26.0", default-features = false } +sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "3.0.0"} +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "3.0.0"} +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +frame-support = { version = "3.0.0", default-features = false, path = "../../frame/support" } +sp-version = { version = "3.0.0", default-features = false, path = "../../primitives/version" } +sp-session = { version = "3.0.0", default-features = false, path = "../../primitives/session" } +sp-api = { version = "3.0.0", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "3.0.0", default-features = false, path = "../../frame/babe" } +frame-system = { version = "3.0.0", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "3.0.0", default-features = false, path = "../../frame/timestamp" } +sp-finality-grandpa = { version = "3.0.0", default-features = false, path = "../../primitives/finality-grandpa" } +sp-trie = { version = "3.0.0", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "3.0.0", default-features = false, path = "../../primitives/transaction-pool" } +trie-db = { version = "0.22.2", default-features = false } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } +sc-service = { version = "0.9.0", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } +sp-state-machine = { version = "0.9.0", default-features = false, path = "../../primitives/state-machine" } +sp-externalities = { version = "0.9.0", default-features = false, path = "../../primitives/externalities" } # 3rd party -cfg-if = "0.1.10" +cfg-if = "1.0" log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } -sc-executor = { version = "0.8.0", path = "../../client/executor" } +sc-block-builder = { version = "0.9.0", path = "../../client/block-builder" } +sc-executor = { version = "0.9.0", path = "../../client/executor" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "4.0.0", path = "../../utils/wasm-builder" } [features] default = [ diff --git a/test-utils/runtime/build.rs b/test-utils/runtime/build.rs index 834551a7ba12d..1de18d32b08b0 100644 --- a/test-utils/runtime/build.rs +++ b/test-utils/runtime/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../utils/wasm-builder") .export_heap_base() // Note that we set the stack-size to 1MB explicitly even though it is set // to this value by default. This is because some of our tests (`restoration_of_globals`) diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index b310bbe7a709a..0c822f0cdff84 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -12,17 +12,17 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "2.0.0", path = "../../../client/light" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } +sc-light = { version = "3.0.0", path = "../../../client/light" } +sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.9.0", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0", path = "../../client" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -futures = "0.3.4" +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-client-api = { version = "3.0.0", path = "../../../client/api" } +sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } +sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service" } +futures = "0.3.9" diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index cc0bbc69e8fc1..9dc27c64143f4 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 9089be3ad4f43..5800203cf7e76 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index b240a42a78555..5a8083065ec0d 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 126447d481848..63c4bab55ec49 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index e772a28ee33a2..b349d1266b031 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The Substrate runtime. This can be compiled with #[no_std], ready for Wasm. +//! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -42,9 +42,11 @@ use sp_runtime::{ }, traits::{ BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, - GetNodeBlockType, GetRuntimeBlockType, NumberFor, Verify, IdentityLookup, + GetNodeBlockType, GetRuntimeBlockType, Verify, IdentityLookup, }, }; +#[cfg(feature = "std")] +use sp_runtime::traits::NumberFor; use sp_version::RuntimeVersion; pub use sp_core::hash::H256; #[cfg(any(feature = "std", test))] @@ -52,13 +54,14 @@ use sp_version::NativeVersion; use frame_support::{ impl_outer_origin, parameter_types, traits::KeyOwnerProofSystem, - weights::{RuntimeDbWeight, Weight}, + weights::RuntimeDbWeight, }; +use frame_system::limits::{BlockWeights, BlockLength}; use sp_inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; // Ensure Babe and Aura use the same crypto to simplify things a bit. -pub use sp_consensus_babe::{AuthorityId, SlotNumber, AllowedSlots}; +pub use sp_consensus_babe::{AuthorityId, Slot, AllowedSlots}; pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; @@ -66,8 +69,8 @@ pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") @@ -146,6 +149,8 @@ pub enum Extrinsic { IncludeData(Vec), StorageChange(Vec, Option>), ChangesTrieConfigUpdate(Option), + OffchainIndexSet(Vec, Vec), + OffchainIndexClear(Vec), } parity_util_mem::malloc_size_of_is_0!(Extrinsic); // non-opaque extrinsic does not need this @@ -174,6 +179,10 @@ impl BlindCheckable for Extrinsic { Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), Extrinsic::ChangesTrieConfigUpdate(new_config) => Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), + Extrinsic::OffchainIndexSet(key, value) => + Ok(Extrinsic::OffchainIndexSet(key, value)), + Extrinsic::OffchainIndexClear(key) => + Ok(Extrinsic::OffchainIndexClear(key)), } } } @@ -197,7 +206,7 @@ impl ExtrinsicT for Extrinsic { impl sp_runtime::traits::Dispatchable for Extrinsic { type Origin = Origin; - type Trait = (); + type Config = (); type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { @@ -424,20 +433,54 @@ impl From> for Event { } } +impl frame_support::traits::PalletInfo for Runtime { + fn index() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some(0) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(1) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(2) + } + + None + } + fn name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some("System") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("Timestamp") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("Babe") + } + + None + } +} + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const MinimumPeriod: u64 = 5; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 100, write: 1000, }; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub RuntimeBlockLength: BlockLength = + BlockLength::max(4 * 1024 * 1024); + pub RuntimeBlockWeights: BlockWeights = + BlockWeights::with_sensible_defaults(4 * 1024 * 1024, Perbill::from_percent(75)); } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; type Origin = Origin; type Call = Extrinsic; type Index = u64; @@ -449,22 +492,17 @@ impl frame_system::Trait for Runtime { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = Self; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = (); @@ -477,7 +515,7 @@ parameter_types! { pub const ExpectedBlockTime: u64 = 10_000; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // there is no actual runtime in this test-runtime, so testing crates @@ -732,10 +770,18 @@ cfg_if! { } } - fn current_epoch_start() -> SlotNumber { + fn current_epoch_start() -> Slot { >::current_epoch_start() } + fn current_epoch() -> sp_consensus_babe::Epoch { + >::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + >::next_epoch() + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: sp_consensus_babe::EquivocationProof< ::Header, @@ -746,7 +792,7 @@ cfg_if! { } fn generate_key_ownership_proof( - _slot_number: sp_consensus_babe::SlotNumber, + _slot: sp_consensus_babe::Slot, _authority_id: sp_consensus_babe::AuthorityId, ) -> Option { None @@ -983,10 +1029,18 @@ cfg_if! { } } - fn current_epoch_start() -> SlotNumber { + fn current_epoch_start() -> Slot { >::current_epoch_start() } + fn current_epoch() -> sp_consensus_babe::Epoch { + >::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + >::next_epoch() + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: sp_consensus_babe::EquivocationProof< ::Header, @@ -997,7 +1051,7 @@ cfg_if! { } fn generate_key_ownership_proof( - _slot_number: sp_consensus_babe::SlotNumber, + _slot: sp_consensus_babe::Slot, _authority_id: sp_consensus_babe::AuthorityId, ) -> Option { None @@ -1131,13 +1185,9 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { root, ); let mut overlay = sp_state_machine::OverlayedChanges::default(); - #[cfg(feature = "std")] - let mut offchain_overlay = Default::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); let mut ext = sp_state_machine::Ext::new( &mut overlay, - #[cfg(feature = "std")] - &mut offchain_overlay, &mut cache, &backend, #[cfg(feature = "std")] diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 818487a89e518..c379ec5de5ec8 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,7 +32,7 @@ use sp_runtime::{ }, }; use codec::{KeyedVec, Encode, Decode}; -use frame_system::Trait; +use frame_system::Config; use crate::{ AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId }; @@ -42,11 +42,11 @@ const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } decl_storage! { - trait Store for Module as TestRuntime { + trait Store for Module as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option; @@ -261,6 +261,14 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), + Extrinsic::OffchainIndexSet(key, value) => { + sp_io::offchain_index::set(&key, &value); + Ok(Ok(())) + }, + Extrinsic::OffchainIndexClear(key) => { + sp_io::offchain_index::clear(&key); + Ok(Ok(())) + } } } diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index a37477fdae58f..6e4e6524c3699 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../client" } -parking_lot = "0.10.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0", path = "../../../client/transaction-pool/graph" } +parking_lot = "0.11.1" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } +sc-transaction-graph = { version = "3.0.0", path = "../../../client/transaction-pool/graph" } futures = { version = "0.3.1", features = ["compat"] } derive_more = "0.99.2" diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index f772ba9b02d5c..bcba2fb6e6781 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index 224eacd5129e3..a2e83fe7b0bf3 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 4e1273b25c993..846b14fe07744 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -13,5 +13,5 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] tokio = { version = "0.2.13", features = ["macros"] } -test-utils = { version = "2.0.0", path = "..", package = "substrate-test-utils" } -sc-service = { version = "0.8.0", path = "../../client/service" } +test-utils = { version = "3.0.0", path = "..", package = "substrate-test-utils" } +sc-service = { version = "0.9.0", path = "../../client/service" } diff --git a/test-utils/test-crate/src/main.rs b/test-utils/test-crate/src/main.rs index 209f29f76132d..2f04568591afe 100644 --- a/test-utils/test-crate/src/main.rs +++ b/test-utils/test-crate/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/test-utils/tests/basic.rs b/test-utils/tests/basic.rs index 3e96bfe83d3a7..3273d0386e8a4 100644 --- a/test-utils/tests/basic.rs +++ b/test-utils/tests/basic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/test-utils/tests/ui.rs b/test-utils/tests/ui.rs index 1f3b466c7dd6e..13602f25572d7 100644 --- a/test-utils/tests/ui.rs +++ b/test-utils/tests/ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/test-utils/tests/ui/missing-func-parameter.rs b/test-utils/tests/ui/missing-func-parameter.rs index bd34a76902ef9..e08d8ae13100a 100644 --- a/test-utils/tests/ui/missing-func-parameter.rs +++ b/test-utils/tests/ui/missing-func-parameter.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/test-utils/tests/ui/too-many-func-parameters.rs b/test-utils/tests/ui/too-many-func-parameters.rs index 9aeadc2a88430..3b742fac7a603 100644 --- a/test-utils/tests/ui/too-many-func-parameters.rs +++ b/test-utils/tests/ui/too-many-func-parameters.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 90668f4e51fe8..d7051620fcfd9 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-browser-utils" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" @@ -16,21 +16,22 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.23", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.27", features = ["websocket"] } console_error_panic_hook = "0.1.6" -console_log = "0.1.2" js-sys = "0.3.34" wasm-bindgen = "0.2.57" -wasm-bindgen-futures = "0.4.7" -kvdb-web = "0.7" -sp-database = { version = "2.0.0", path = "../../primitives/database" } -sc-informant = { version = "0.8.0", path = "../../client/informant" } -sc-service = { version = "0.8.0", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network", version = "0.8.0"} -sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0"} +wasm-bindgen-futures = "0.4.18" +kvdb-web = "0.9.0" +sp-database = { version = "3.0.0", path = "../../primitives/database" } +sc-informant = { version = "0.9.0", path = "../../client/informant" } +sc-service = { version = "0.9.0", path = "../../client/service", default-features = false } +sc-network = { path = "../../client/network", version = "0.9.0"} +sc-chain-spec = { path = "../../client/chain-spec", version = "3.0.0"} +sc-telemetry = { path = "../../client/telemetry", version = "3.0.0"} +sc-tracing = { path = "../../client/tracing", version = "3.0.0"} # Imported just for the `wasm-bindgen` feature -rand6 = { package = "rand", version = "0.6", features = ["wasm-bindgen"] } +getrandom = { version = "0.2", features = ["js"] } rand = { version = "0.7", features = ["wasm-bindgen"] } futures-timer = { version = "3.0.1", features = ["wasm-bindgen"]} chrono = { version = "0.4", features = ["wasmbind"] } diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 95ec7ca19c9a4..ea9dfc9674f7f 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,11 @@ use sc_network::config::TransportConfig; use sc_service::{ RpcSession, Role, Configuration, TaskManager, RpcHandlers, config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, - GenericChainSpec, RuntimeGenesis + GenericChainSpec, RuntimeGenesis, + KeepBlocks, TransactionStorageMode, }; +use sc_telemetry::{TelemetryHandle, TelemetrySpan}; +use sc_tracing::logging::LoggerBuilder; use wasm_bindgen::prelude::*; use futures::{ prelude::*, channel::{oneshot, mpsc}, compat::*, future::{ready, ok, select} @@ -32,20 +35,31 @@ use sc_chain_spec::Extension; use libp2p_wasm_ext::{ExtTransport, ffi}; pub use console_error_panic_hook::set_once as set_console_error_panic_hook; -pub use console_log::init_with_level as init_console_log; + +/// Initialize the logger and return a `TelemetryWorker` and a wasm `ExtTransport`. +pub fn init_logging_and_telemetry( + pattern: &str, +) -> Result { + let transport = ExtTransport::new(ffi::websocket_transport()); + let mut logger = LoggerBuilder::new(pattern); + logger.with_transport(transport); + logger.init() +} /// Create a service configuration from a chain spec. /// /// This configuration contains good defaults for a browser light client. -pub async fn browser_configuration(chain_spec: GenericChainSpec) - -> Result> +pub async fn browser_configuration( + chain_spec: GenericChainSpec, + telemetry_handle: Option, +) -> Result> where G: RuntimeGenesis + 'static, E: Extension + 'static + Send + Sync, { let name = chain_spec.name().to_string(); - let transport = ExtTransport::new(ffi::websocket_transport()); + let mut network = NetworkConfiguration::new( format!("{} (Browser)", name), "unknown", @@ -58,6 +72,7 @@ where allow_private_ipv4: true, enable_mdns: false, }; + let telemetry_span = telemetry_handle.as_ref().map(|_| TelemetrySpan::new()); let config = Configuration { network, @@ -68,6 +83,8 @@ where async {} }).into(), telemetry_external_transport: Some(transport), + telemetry_handle, + telemetry_span, role: Role::Light, database: { info!("Opening Indexed DB database '{}'...", name); @@ -75,6 +92,7 @@ where DatabaseConfig::Custom(sp_database::as_database(db)) }, + keystore_remote: Default::default(), keystore: KeystoreConfig::InMemory, default_heap_pages: Default::default(), dev_key_seed: Default::default(), @@ -85,7 +103,9 @@ where impl_version: String::from("0.0.0"), offchain_worker: Default::default(), prometheus_config: Default::default(), - pruning: Default::default(), + state_pruning: Default::default(), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, rpc_cors: Default::default(), rpc_http: Default::default(), rpc_ipc: Default::default(), @@ -105,6 +125,7 @@ where informant_output_format: sc_informant::OutputFormat { enable_color: false, }, + disable_log_reloading: false, }; Ok(config) diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 30c8a4c52b657..fbef70db93bfd 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,4 +13,4 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -platforms = "0.2.1" +platforms = "1.1" diff --git a/utils/build-script-utils/src/git.rs b/utils/build-script-utils/src/git.rs index 29c6a325fe7e9..d01343634bc94 100644 --- a/utils/build-script-utils/src/git.rs +++ b/utils/build-script-utils/src/git.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/build-script-utils/src/lib.rs b/utils/build-script-utils/src/lib.rs index 512e6dcaefda7..8eb17a7de61fb 100644 --- a/utils/build-script-utils/src/lib.rs +++ b/utils/build-script-utils/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/build-script-utils/src/version.rs b/utils/build-script-utils/src/version.rs index 103fd5b1d24ac..f92c637c78cca 100644 --- a/utils/build-script-utils/src/version.rs +++ b/utils/build-script-utils/src/version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 23662722a1f6c..11c269bc3cba8 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fork-tree" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,4 +14,4 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 1d01c53417649..d1ec67d37b954 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -144,7 +144,7 @@ impl ForkTree where for child in root_children { if is_first && (child.number == *number && child.hash == *hash || - child.number < *number && is_descendent_of(&child.hash, hash).unwrap_or(false)) + child.number < *number && is_descendent_of(&child.hash, hash)?) { root.children.push(child); // assuming that the tree is well formed only one child should pass this requirement @@ -229,7 +229,10 @@ impl ForkTree where number = n; data = d; }, - None => return Ok(false), + None => { + self.rebalance(); + return Ok(false); + }, } } @@ -251,7 +254,9 @@ impl ForkTree where } fn node_iter(&self) -> impl Iterator> { - ForkTreeIterator { stack: self.roots.iter().collect() } + // we need to reverse the order of roots to maintain the expected + // ordering since the iterator uses a stack to track state. + ForkTreeIterator { stack: self.roots.iter().rev().collect() } } /// Iterates the nodes in the tree in pre-order. @@ -410,15 +415,15 @@ impl ForkTree where // another fork not part of the tree). make sure to only keep roots that // are part of the finalized branch let mut changed = false; - self.roots.retain(|root| { - let retain = root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); + let roots = std::mem::take(&mut self.roots); - if !retain { + for root in roots { + if root.number > number && is_descendent_of(hash, &root.hash)? { + self.roots.push(root); + } else { changed = true; } - - retain - }); + } self.best_finalized_number = Some(number); @@ -462,16 +467,19 @@ impl ForkTree where let (is_finalized, is_descendant, is_ancestor) = { let root = &self.roots[idx]; let is_finalized = root.hash == *hash; - let is_descendant = !is_finalized - && root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); - let is_ancestor = !is_finalized && !is_descendant - && root.number < number && is_descendent_of(&root.hash, hash).unwrap_or(false); + let is_descendant = + !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; + let is_ancestor = !is_finalized + && !is_descendant && root.number < number + && is_descendent_of(&root.hash, hash)?; (is_finalized, is_descendant, is_ancestor) }; // if we have met finalized root - open it and return if is_finalized { - return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))); + return Ok(FinalizationResult::Changed(Some( + self.finalize_root_at(idx), + ))); } // if node is descendant of finalized block - just leave it as is @@ -605,18 +613,19 @@ impl ForkTree where // descendent (in this case the node wasn't finalized earlier presumably // because the predicate didn't pass). let mut changed = false; - self.roots.retain(|root| { - let retain = - root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false) || - root.number == number && root.hash == *hash || - is_descendent_of(&root.hash, hash).unwrap_or(false); + let roots = std::mem::take(&mut self.roots); - if !retain { + for root in roots { + let retain = root.number > number && is_descendent_of(hash, &root.hash)? + || root.number == number && root.hash == *hash + || is_descendent_of(&root.hash, hash)?; + + if retain { + self.roots.push(root); + } else { changed = true; } - - retain - }); + } self.best_finalized_number = Some(number); @@ -898,8 +907,7 @@ impl Iterator for RemovedIterator { // child nodes are stored ordered by max branch height (decreasing), // we want to keep this ordering while iterating but since we're // using a stack for iterator state we need to reverse it. - let mut children = Vec::new(); - std::mem::swap(&mut children, &mut node.children); + let children = std::mem::take(&mut node.children); self.stack.extend(children.into_iter().rev()); (node.hash, node.number, node.data) @@ -939,6 +947,10 @@ mod test { // — J - K // // (where N is not a part of fork tree) + // + // NOTE: the tree will get automatically rebalance on import and won't be laid out like the + // diagram above. the children will be ordered by subtree depth and the longest branches + // will be on the leftmost side of the tree. let is_descendent_of = |base: &&str, block: &&str| -> Result { let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "O"]; match (*base, *block) { @@ -1132,7 +1144,7 @@ mod test { assert_eq!( tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], + vec![("L", 4), ("I", 4)], ); // finalizing a node from another fork that isn't part of the tree clears the tree @@ -1180,7 +1192,7 @@ mod test { assert_eq!( tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], + vec![("L", 4), ("I", 4)], ); assert_eq!( @@ -1212,7 +1224,7 @@ mod test { #[test] fn finalize_with_descendent_works() { #[derive(Debug, PartialEq)] - struct Change { effective: u64 }; + struct Change { effective: u64 } let (mut tree, is_descendent_of) = { let mut tree = ForkTree::new(); @@ -1354,11 +1366,11 @@ mod test { vec![ ("A", 1), ("B", 2), ("C", 3), ("D", 4), ("E", 5), - ("F", 2), + ("F", 2), ("H", 3), ("L", 4), ("M", 5), + ("O", 5), + ("I", 4), ("G", 3), - ("H", 3), ("I", 4), - ("L", 4), ("M", 5), ("O", 5), - ("J", 2), ("K", 3) + ("J", 2), ("K", 3), ], ); } @@ -1480,7 +1492,7 @@ mod test { assert_eq!( removed.map(|(hash, _, _)| hash).collect::>(), - vec!["A", "F", "G", "H", "I", "L", "M", "O", "J", "K"] + vec!["A", "F", "H", "L", "M", "O", "I", "G", "J", "K"] ); let removed = tree.prune( @@ -1545,19 +1557,30 @@ mod test { fn tree_rebalance() { let (mut tree, _) = test_fork_tree(); + // the tree is automatically rebalanced on import, therefore we should iterate in preorder + // exploring the longest forks first. check the ascii art above to understand the expected + // output below. assert_eq!( tree.iter().map(|(h, _, _)| *h).collect::>(), - vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "L", "M", "O", "J", "K"], + vec!["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"], ); - // after rebalancing the tree we should iterate in preorder exploring - // the longest forks first. check the ascii art above to understand the - // expected output below. - tree.rebalance(); + // let's add a block "P" which is a descendent of block "O" + let is_descendent_of = |base: &&str, block: &&str| -> Result { + match (*base, *block) { + (b, "P") => Ok(vec!["A", "F", "L", "O"].into_iter().any(|n| n == b)), + _ => Ok(false), + } + }; + + tree.import("P", 6, (), &is_descendent_of).unwrap(); + // this should re-order the tree, since the branch "A -> B -> C -> D -> E" is no longer tied + // with 5 blocks depth. additionally "O" should be visited before "M" now, since it has one + // descendent "P" which makes that branch 6 blocks long. assert_eq!( tree.iter().map(|(h, _, _)| *h).collect::>(), - ["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"] + ["A", "F", "H", "L", "O", "P", "M", "I", "G", "B", "C", "D", "E", "J", "K"] ); } } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 4ee2454e708e9..c810bd4d57d79 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,21 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-db = { version = "0.8.0", path = "../../../client/db" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } -sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +frame-benchmarking = { version = "3.0.0", path = "../../../frame/benchmarking" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service" } +sc-cli = { version = "0.9.0", path = "../../../client/cli" } +sc-client-db = { version = "0.9.0", path = "../../../client/db" } +sc-executor = { version = "0.9.0", path = "../../../client/executor" } +sp-externalities = { version = "0.9.0", path = "../../../primitives/externalities" } +sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.9.0", path = "../../../primitives/state-machine" } +codec = { version = "2.0.0", package = "parity-scale-codec" } structopt = "0.3.8" -codec = { version = "1.3.1", package = "parity-scale-codec" } chrono = "0.4" serde = "1.0.116" handlebars = "3.5.0" +Inflector = "0.11.4" [features] default = ["db"] diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 00a2e7bd7f942..da8a1d98f09a5 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -62,7 +62,6 @@ impl BenchmarkCmd { let genesis_storage = spec.build_storage()?; let mut changes = Default::default(); - let mut offchain_changes = Default::default(); let cache_size = Some(self.database_cache_size as usize); let state = BenchmarkingState::::new(genesis_storage, cache_size)?; let executor = NativeExecutor::::new( @@ -80,7 +79,6 @@ impl BenchmarkCmd { &state, None, &mut changes, - &mut offchain_changes, &executor, "Benchmark_dispatch_benchmark", &( @@ -174,7 +172,7 @@ impl BenchmarkCmd { } } }, - Err(error) => eprintln!("Error: {:?}", error), + Err(error) => eprintln!("Error: {}", error), } Ok(()) diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index b89bceeb953c3..ba1a52aa3644e 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs index 7f7e2d6dcb995..0ff6144214d61 100644 --- a/utils/frame/benchmarking-cli/src/template.hbs +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -1,5 +1,6 @@ {{header}} -//! Weights for {{pallet}} +//! Autogenerated weights for {{pallet}} +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}, LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} @@ -17,7 +18,7 @@ use sp_std::marker::PhantomData; /// Weight functions for {{pallet}}. pub struct WeightInfo(PhantomData); -impl {{pallet}}::WeightInfo for WeightInfo { +impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmarks as |benchmark|}} fn {{benchmark.name~}} ( @@ -26,6 +27,7 @@ impl {{pallet}}::WeightInfo for WeightInfo { ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{~/each}} {{~#if (ne benchmark.base_reads "0")}} diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 61423000231d0..bde0be25d0368 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,9 +22,10 @@ use std::fs; use std::path::PathBuf; use serde::Serialize; +use inflector::Inflector; use crate::BenchmarkCmd; -use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis}; +use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, RegressionModel}; use sp_runtime::traits::Zero; const VERSION: &'static str = env!("CARGO_PKG_VERSION"); @@ -37,6 +38,7 @@ struct TemplateData { date: String, version: String, pallet: String, + instance: String, header: String, cmd: CmdData, benchmarks: Vec, @@ -84,6 +86,8 @@ struct ComponentSlope { name: String, #[serde(serialize_with = "string_serialize")] slope: u128, + #[serde(serialize_with = "string_serialize")] + error: u128, } // Small helper to create an `io::Error` from a string. @@ -100,7 +104,7 @@ fn io_error(s: &str) -> std::io::Error { // p1 -> [b1, b2, b3] // p2 -> [b1, b2] // ``` -fn map_results(batches: &[BenchmarkBatch]) -> Result>, std::io::Error> { +fn map_results(batches: &[BenchmarkBatch]) -> Result>, std::io::Error> { // Skip if batches is empty. if batches.is_empty() { return Err(io_error("empty batches")) } @@ -113,6 +117,7 @@ fn map_results(batches: &[BenchmarkBatch]) -> Result Result) -> impl Iterator + '_ { + let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter()); + std::iter::from_fn(move || { + match &mut errors { + Some(model) => model.next().map(|val| *val as u128), + _ => Some(0), + } + }) +} + // Analyze and return the relevant results for a given benchmark. fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData { // Analyze benchmarks to get the linear regression. @@ -145,27 +162,45 @@ fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData { let mut used_reads = Vec::new(); let mut used_writes = Vec::new(); - extrinsic_time.slopes.into_iter().zip(extrinsic_time.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_extrinsic_time.push(ComponentSlope { - name: name.clone(), - slope: slope.saturating_mul(1000), - }); - } - }); - reads.slopes.into_iter().zip(reads.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_reads.push(ComponentSlope { name: name.clone(), slope }); - } - }); - writes.slopes.into_iter().zip(writes.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_writes.push(ComponentSlope { name: name.clone(), slope }); - } - }); + extrinsic_time.slopes.into_iter() + .zip(extrinsic_time.names.iter()) + .zip(extract_errors(&extrinsic_time.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_extrinsic_time.push(ComponentSlope { + name: name.clone(), + slope: slope.saturating_mul(1000), + error: error.saturating_mul(1000), + }); + } + }); + reads.slopes.into_iter() + .zip(reads.names.iter()) + .zip(extract_errors(&reads.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_reads.push(ComponentSlope { + name: name.clone(), + slope, + error, + }); + } + }); + writes.slopes.into_iter() + .zip(writes.names.iter()) + .zip(extract_errors(&writes.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_writes.push(ComponentSlope { + name: name.clone(), + slope, + error, + }); + } + }); // This puts a marker on any component which is entirely unused in the weight formula. let components = batch.results[0].components @@ -241,12 +276,18 @@ pub fn write_results( // Organize results by pallet into a JSON map let all_results = map_results(batches)?; - for (pallet, results) in all_results.into_iter() { + for ((pallet, instance), results) in all_results.iter() { let mut file_path = path.clone(); // If a user only specified a directory... if file_path.is_dir() { - // Create new file: "path/to/pallet_name.rs". - file_path.push(&pallet); + // Check if there might be multiple instances benchmarked. + if all_results.keys().any(|(p, i)| p == pallet && i != instance) { + // Create new file: "path/to/pallet_name_instance_name.rs". + file_path.push(pallet.clone() + "_" + &instance.to_snake_case()); + } else { + // Create new file: "path/to/pallet_name.rs". + file_path.push(pallet.clone()); + } file_path.set_extension("rs"); } @@ -254,10 +295,11 @@ pub fn write_results( args: args.clone(), date: date.clone(), version: VERSION.to_string(), - pallet: pallet, + pallet: pallet.to_string(), + instance: instance.to_string(), header: header_text.clone(), cmd: cmd_data.clone(), - benchmarks: results, + benchmarks: results.clone(), }; let mut output_file = fs::File::create(file_path)?; @@ -362,6 +404,7 @@ mod test { return BenchmarkBatch { pallet: [pallet.to_vec(), b"_pallet".to_vec()].concat(), + instance: b"instance".to_vec(), benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(), results, } @@ -379,18 +422,30 @@ mod test { assert_eq!(benchmark.base_weight, base * 1_000); assert_eq!( benchmark.component_weight, - vec![ComponentSlope { name: component.to_string(), slope: slope * 1_000 }] + vec![ComponentSlope { + name: component.to_string(), + slope: slope * 1_000, + error: 0, + }] ); // DB Reads/Writes are untouched assert_eq!(benchmark.base_reads, base); assert_eq!( benchmark.component_reads, - vec![ComponentSlope { name: component.to_string(), slope: slope }] + vec![ComponentSlope { + name: component.to_string(), + slope, + error: 0, + }] ); assert_eq!(benchmark.base_writes, base); assert_eq!( benchmark.component_writes, - vec![ComponentSlope { name: component.to_string(), slope: slope }] + vec![ComponentSlope { + name: component.to_string(), + slope, + error: 0, + }] ); } @@ -402,15 +457,21 @@ mod test { test_data(b"second", b"first", BenchmarkParameter::c, 3, 4), ]).unwrap(); - let first_benchmark = &mapped_results.get("first_pallet").unwrap()[0]; + let first_benchmark = &mapped_results.get( + &("first_pallet".to_string(), "instance".to_string()) + ).unwrap()[0]; assert_eq!(first_benchmark.name, "first_benchmark"); check_data(first_benchmark, "a", 10, 3); - let second_benchmark = &mapped_results.get("first_pallet").unwrap()[1]; + let second_benchmark = &mapped_results.get( + &("first_pallet".to_string(), "instance".to_string()) + ).unwrap()[1]; assert_eq!(second_benchmark.name, "second_benchmark"); check_data(second_benchmark, "b", 9, 2); - let second_pallet_benchmark = &mapped_results.get("second_pallet").unwrap()[0]; + let second_pallet_benchmark = &mapped_results.get( + &("second_pallet".to_string(), "instance".to_string()) + ).unwrap()[0]; assert_eq!(second_pallet_benchmark.name, "first_benchmark"); check_data(second_pallet_benchmark, "c", 3, 4); } diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 0e39f35512541..cb37119edf0b2 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-cli" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,11 +11,11 @@ documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" [dependencies] -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } +sc-cli = { version = "0.9.0", path = "../../../client/cli" } +sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } structopt = "0.3.8" -frame-system = { version = "2.0.0", path = "../../../frame/system" } +frame-system = { version = "3.0.0", path = "../../../frame/system" } [dev-dependencies] diff --git a/utils/frame/frame-utilities-cli/src/lib.rs b/utils/frame/frame-utilities-cli/src/lib.rs index 872cfc99a63dc..2d6bf4ab9d8f1 100644 --- a/utils/frame/frame-utilities-cli/src/lib.rs +++ b/utils/frame/frame-utilities-cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/module_id.rs index cc76c70d0fa8e..187c2de1dd6d5 100644 --- a/utils/frame/frame-utilities-cli/src/module_id.rs +++ b/utils/frame/frame-utilities-cli/src/module_id.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -64,7 +64,7 @@ impl ModuleIdCmd { /// runs the command pub fn run(&self) -> Result<(), Error> where - R: frame_system::Trait, + R: frame_system::Config, R::AccountId: Ss58Codec, { if self.id.len() != 8 { diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 2541ed0cf655f..ca3705b499a22 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies ", "Andrew Dirksen "] edition = "2018" license = "Apache-2.0" @@ -13,14 +13,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = { version = "0.3.0", features = ["compat"] } -jsonrpc-client-transports = { version = "15.0.0", default-features = false, features = ["http"] } -jsonrpc-core = "15.0.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } +jsonrpc-client-transports = { version = "15.1.0", default-features = false, features = ["http"] } +jsonrpc-core = "15.1.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } serde = "1" -frame-support = { version = "2.0.0", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8.0", path = "../../../../client/rpc-api" } +frame-support = { version = "3.0.0", path = "../../../../frame/support" } +sp-storage = { version = "3.0.0", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.9.0", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0", path = "../../../../frame/system" } +frame-system = { version = "3.0.0", path = "../../../../frame/system" } tokio = "0.2" diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index dc87d6185209d..417f2bfc22ac8 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,11 +40,11 @@ use sc_rpc_api::state::StateClient; /// # use codec::Encode; /// # use frame_support::{decl_storage, decl_module}; /// # use substrate_frame_rpc_support::StorageQuery; -/// # use frame_system::Trait; +/// # use frame_system::Config; /// # use sc_rpc_api::state::StateClient; /// # -/// # // Hash would normally be ::Hash, but we don't have -/// # // frame_system::Trait implemented for TestRuntime. Here we just pretend. +/// # // Hash would normally be ::Hash, but we don't have +/// # // frame_system::Config implemented for TestRuntime. Here we just pretend. /// # type Hash = (); /// # /// # fn main() -> Result<(), RpcError> { @@ -54,7 +54,7 @@ use sc_rpc_api::state::StateClient; /// # struct TestRuntime; /// # /// # decl_module! { -/// # pub struct Module for enum Call where origin: T::Origin {} +/// # pub struct Module for enum Call where origin: T::Origin {} /// # } /// # /// pub type Loc = (i64, i64, i64); @@ -62,7 +62,7 @@ use sc_rpc_api::state::StateClient; /// /// // Note that all fields are marked pub. /// decl_storage! { -/// trait Store for Module as TestRuntime { +/// trait Store for Module as TestRuntime { /// pub LastActionId: u64; /// pub Voxels: map hasher(blake2_128_concat) Loc => Block; /// pub Actions: map hasher(blake2_128_concat) u64 => Loc; @@ -125,7 +125,7 @@ impl StorageQuery { /// Send this query over RPC, await the typed result. /// - /// Hash should be ::Hash. + /// Hash should be ::Hash. /// /// # Arguments /// diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 515ff93251522..ea8d97a82ad34 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,24 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0", path = "../../../../client/api" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +sc-client-api = { version = "3.0.0", path = "../../../../client/api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" +jsonrpc-core = "15.1.0" +jsonrpc-core-client = "15.1.0" +jsonrpc-derive = "15.1.0" log = "0.4.8" serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0", path = "../../../../primitives/transaction-pool" } -sp-block-builder = { version = "2.0.0", path = "../../../../primitives/block-builder" } -sc-rpc-api = { version = "0.8.0", path = "../../../../client/rpc-api" } +sp-runtime = { version = "3.0.0", path = "../../../../primitives/runtime" } +sp-api = { version = "3.0.0", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "3.0.0", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "3.0.0", path = "../../../../primitives/core" } +sp-blockchain = { version = "3.0.0", path = "../../../../primitives/blockchain" } +sp-transaction-pool = { version = "3.0.0", path = "../../../../primitives/transaction-pool" } +sp-block-builder = { version = "3.0.0", path = "../../../../primitives/block-builder" } +sc-rpc-api = { version = "0.9.0", path = "../../../../client/rpc-api" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } -sp-tracing = { version = "2.0.0", path = "../../../../primitives/tracing" } -sc-transaction-pool = { version = "2.0.0", path = "../../../../client/transaction-pool" } +sp-tracing = { version = "3.0.0", path = "../../../../primitives/tracing" } +sc-transaction-pool = { version = "3.0.0", path = "../../../../client/transaction-pool" } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index cefe39534a167..57c0cda9cca3a 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -301,6 +301,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), @@ -340,6 +341,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), @@ -363,6 +365,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), @@ -395,6 +398,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 9eed7a2fdcfcd..a7f90e831620b 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Endpoint to expose Prometheus metrics" name = "substrate-prometheus-endpoint" -version = "0.8.0" +version = "0.9.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus = { version = "0.10.0", default-features = false } +prometheus = { version = "0.11.0", default-features = false } futures-util = { version = "0.3.1", default-features = false, features = ["io"] } derive_more = "0.99" [target.'cfg(not(target_os = "unknown"))'.dependencies] async-std = { version = "1.6.5", features = ["unstable"] } -hyper = { version = "0.13.1", default-features = false, features = ["stream"] } +hyper = { version = "0.13.9", default-features = false, features = ["stream"] } tokio = "0.2" diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index be7050a8a0736..d7cdfcd0443bf 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use futures_util::{FutureExt, future::Future}; pub use prometheus::{ @@ -33,7 +34,7 @@ use std::net::SocketAddr; mod networking; mod sourced; -pub use sourced::{SourcedCounter, SourcedGauge, MetricSource}; +pub use sourced::{SourcedCounter, SourcedGauge, MetricSource, SourcedMetric}; #[cfg(target_os = "unknown")] pub use unknown_os::init_prometheus; diff --git a/utils/prometheus/src/networking.rs b/utils/prometheus/src/networking.rs index 92b9fedf6c79a..48ae8a23297c9 100644 --- a/utils/prometheus/src/networking.rs +++ b/utils/prometheus/src/networking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/prometheus/src/sourced.rs b/utils/prometheus/src/sourced.rs index 58f60e4969bb8..014bdb30f8ab7 100644 --- a/utils/prometheus/src/sourced.rs +++ b/utils/prometheus/src/sourced.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Metrics that are collected from existing sources. diff --git a/utils/wasm-builder-runner/Cargo.toml b/utils/wasm-builder-runner/Cargo.toml deleted file mode 100644 index 2c54a5ec3a4d6..0000000000000 --- a/utils/wasm-builder-runner/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "substrate-wasm-builder-runner" -version = "2.0.0" -authors = ["Parity Technologies "] -description = "Runner for substrate-wasm-builder" -edition = "2018" -readme = "README.md" -repository = "https://github.com/paritytech/substrate/" -license = "Apache-2.0" -homepage = "https://substrate.dev" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] diff --git a/utils/wasm-builder-runner/README.md b/utils/wasm-builder-runner/README.md deleted file mode 100644 index 1b9e2b08ca444..0000000000000 --- a/utils/wasm-builder-runner/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## WASM builder runner - -Since cargo contains many bugs when it comes to correct dependency and feature -resolution, we need this little tool. See for -more information. - -It will create a project that will call `substrate-wasm-builder` to prevent any dependencies -from `substrate-wasm-builder` influencing the main project's dependencies. - -For more information see - -License: GPL-3.0 diff --git a/utils/wasm-builder-runner/src/lib.rs b/utils/wasm-builder-runner/src/lib.rs deleted file mode 100644 index 04e06495c69b4..0000000000000 --- a/utils/wasm-builder-runner/src/lib.rs +++ /dev/null @@ -1,498 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # WASM builder runner -//! -//! Since cargo contains many bugs when it comes to correct dependency and feature -//! resolution, we need this little tool. See for -//! more information. -//! -//! It will create a project that will call `substrate-wasm-builder` to prevent any dependencies -//! from `substrate-wasm-builder` influencing the main project's dependencies. -//! -//! For more information see - -use std::{ - env, process::{Command, self}, fs, path::{PathBuf, Path}, hash::{Hash, Hasher}, - collections::hash_map::DefaultHasher, -}; - -/// Environment variable that tells us to skip building the WASM binary. -const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; - -/// Environment variable that tells us to create a dummy WASM binary. -/// -/// This is useful for `cargo check` to speed-up the compilation. -/// -/// # Caution -/// -/// Enabling this option will just provide `&[]` as WASM binary. -const DUMMY_WASM_BINARY_ENV: &str = "BUILD_DUMMY_WASM_BINARY"; - -/// Environment variable that makes sure the WASM build is triggered. -const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; - -/// Replace all backslashes with slashes. -fn replace_back_slashes(path: T) -> String { - path.to_string().replace("\\", "/") -} - -/// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. -fn get_manifest_dir() -> PathBuf { - env::var("CARGO_MANIFEST_DIR") - .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") - .into() -} - -/// First step of the [`WasmBuilder`] to select the project to build. -pub struct WasmBuilderSelectProject { - /// This parameter just exists to make it impossible to construct - /// this type outside of this crate. - _ignore: (), -} - -impl WasmBuilderSelectProject { - /// Use the current project as project for building the WASM binary. - /// - /// # Panics - /// - /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable - /// is always set by `Cargo` in `build.rs` files. - pub fn with_current_project(self) -> WasmBuilderSelectSource { - WasmBuilderSelectSource(get_manifest_dir().join("Cargo.toml")) - } - - /// Use the given `path` as project for building the WASM binary. - /// - /// Returns an error if the given `path` does not points to a `Cargo.toml`. - pub fn with_project( - self, - path: impl Into, - ) -> Result { - let path = path.into(); - - if path.ends_with("Cargo.toml") { - Ok(WasmBuilderSelectSource(path)) - } else { - Err("Project path must point to the `Cargo.toml` of the project") - } - } -} - -/// Second step of the [`WasmBuilder`] to set the source of the `wasm-builder`. -pub struct WasmBuilderSelectSource(PathBuf); - -impl WasmBuilderSelectSource { - /// Use the given `path` as source for `wasm-builder`. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_path(self, path: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Path(path), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `repo` and `rev` as source for `wasm-builder`. - pub fn with_wasm_builder_from_git(self, repo: &'static str, rev: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Git { repo, rev }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io. - pub fn with_wasm_builder_from_crates(self, version: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Crates(version), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io or use - /// the given `path` as source. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_crates_or_path( - self, - version: &'static str, - path: &'static str, - ) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::CratesOrPath { version, path }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `source` as source for `wasm-builder`. - pub fn with_wasm_builder_source(self, source: WasmBuilderSource) -> WasmBuilder { - WasmBuilder { - source, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } -} - -/// The builder for building a wasm binary. -/// -/// The builder itself is seperated into multiple structs to make the setup type safe. -/// -/// Building a wasm binary: -/// -/// 1. Call [`WasmBuilder::new`] to create a new builder. -/// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. -/// 3. Select the source of the `wasm-builder` crate using the methods of -/// [`WasmBuilderSelectSource`]. -/// 4. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code -/// using methods of [`WasmBuilder`]. -/// 5. Build the WASM binary using [`Self::build`]. -pub struct WasmBuilder { - /// Where should we pull the `wasm-builder` crate from. - source: WasmBuilderSource, - /// Flags that should be appended to `RUST_FLAGS` env variable. - rust_flags: Vec, - /// The name of the file that is being generated in `OUT_DIR`. - /// - /// Defaults to `wasm_binary.rs`. - file_name: Option, - /// The path to the `Cargo.toml` of the project that should be build - /// for wasm. - project_cargo_toml: PathBuf, -} - -impl WasmBuilder { - /// Create a new instance of the builder. - pub fn new() -> WasmBuilderSelectProject { - WasmBuilderSelectProject { - _ignore: (), - } - } - - /// Enable exporting `__heap_base` as global variable in the WASM binary. - /// - /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. - pub fn export_heap_base(mut self) -> Self { - self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); - self - } - - /// Set the name of the file that will be generated in `OUT_DIR`. - /// - /// This file needs to be included to get access to the build WASM binary. - /// - /// If this function is not called, `file_name` defaults to `wasm_binary.rs` - pub fn set_file_name(mut self, file_name: impl Into) -> Self { - self.file_name = Some(file_name.into()); - self - } - - /// Instruct the linker to import the memory into the WASM binary. - /// - /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. - pub fn import_memory(mut self) -> Self { - self.rust_flags.push("-C link-arg=--import-memory".into()); - self - } - - /// Append the given `flag` to `RUST_FLAGS`. - /// - /// `flag` is appended as is, so it needs to be a valid flag. - pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { - self.rust_flags.push(flag.into()); - self - } - - /// Build the WASM binary. - pub fn build(self) { - let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); - let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); - - if check_skip_build() { - // If we skip the build, we still want to make sure to be called when an env variable - // changes - generate_rerun_if_changed_instructions(); - - provide_dummy_wasm_binary(&file_path, true); - - return; - } - - // Hash the path to the project cargo toml. - let mut hasher = DefaultHasher::new(); - self.project_cargo_toml.hash(&mut hasher); - - let project_name = env::var("CARGO_PKG_NAME").expect("`CARGO_PKG_NAME` is set by cargo!"); - // Make sure the `wasm-builder-runner` path is unique by concatenating the name of the - // project that is compiling the WASM binary with the hash of the path to the project that - // should be compiled as WASM binary. - let project_folder = get_workspace_root() - .join(format!("{}{}", project_name, hasher.finish())); - - if check_provide_dummy_wasm_binary() { - provide_dummy_wasm_binary(&file_path, false); - } else { - create_project( - &project_folder, - &file_path, - self.source, - &self.project_cargo_toml, - &self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect::(), - ); - run_project(&project_folder); - } - - // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't - // want to spam the output! - generate_rerun_if_changed_instructions(); - } -} - -/// The `wasm-builder` dependency source. -pub enum WasmBuilderSource { - /// The relative path to the source code from the current manifest dir. - Path(&'static str), - /// The git repository that contains the source code. - Git { - repo: &'static str, - rev: &'static str, - }, - /// Use the given version released on crates.io. - Crates(&'static str), - /// Use the given version released on crates.io or from the given path. - CratesOrPath { - version: &'static str, - path: &'static str, - } -} - -impl WasmBuilderSource { - /// Convert to a valid cargo source declaration. - /// - /// `absolute_path` - The manifest dir. - fn to_cargo_source(&self, manifest_dir: &Path) -> String { - match self { - WasmBuilderSource::Path(path) => { - replace_back_slashes(format!("path = \"{}\"", manifest_dir.join(path).display())) - } - WasmBuilderSource::Git { repo, rev } => { - format!("git = \"{}\", rev=\"{}\"", repo, rev) - } - WasmBuilderSource::Crates(version) => { - format!("version = \"{}\"", version) - } - WasmBuilderSource::CratesOrPath { version, path } => { - replace_back_slashes( - format!( - "path = \"{}\", version = \"{}\"", - manifest_dir.join(path).display(), - version - ) - ) - } - } - } -} - -/// Build the currently built project as WASM binary and extend `RUSTFLAGS` with the given rustflags. -/// -/// For more information, see [`build_current_project`]. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] -pub fn build_current_project_with_rustflags( - file_name: &str, - wasm_builder_source: WasmBuilderSource, - default_rust_flags: &str, -) { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_source(wasm_builder_source) - .append_to_rust_flags(default_rust_flags) - .set_file_name(file_name) - .build() -} - -/// Build the currently built project as WASM binary. -/// -/// The current project is determined using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name of the file being generated in the `OUT_DIR`. The file contains the -/// constant `WASM_BINARY` which contains the build wasm binary. -/// `wasm_builder_path` - Path to the wasm-builder project, relative to `CARGO_MANIFEST_DIR`. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] -pub fn build_current_project(file_name: &str, wasm_builder_source: WasmBuilderSource) { - #[allow(deprecated)] - build_current_project_with_rustflags(file_name, wasm_builder_source, ""); -} - -/// Returns the root path of the wasm-builder workspace. -/// -/// The wasm-builder workspace contains all wasm-builder's projects. -fn get_workspace_root() -> PathBuf { - let out_dir_env = env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!"); - let mut out_dir = PathBuf::from(&out_dir_env); - - loop { - match out_dir.parent() { - Some(parent) if out_dir.ends_with("build") => return parent.join("wbuild-runner"), - _ => if !out_dir.pop() { - break; - } - } - } - - panic!("Could not find target dir in: {}", out_dir_env) -} - -fn create_project( - project_folder: &Path, - file_path: &Path, - wasm_builder_source: WasmBuilderSource, - cargo_toml_path: &Path, - default_rustflags: &str, -) { - fs::create_dir_all(project_folder.join("src")) - .expect("WASM build runner dir create can not fail; qed"); - - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" - [package] - name = "wasm-build-runner-impl" - version = "1.0.0" - edition = "2018" - - [dependencies] - substrate-wasm-builder = {{ {wasm_builder_source} }} - - [workspace] - "#, - wasm_builder_source = wasm_builder_source.to_cargo_source(&get_manifest_dir()), - ), - ); - - write_file_if_changed( - project_folder.join("src/main.rs"), - format!( - r#" - //! This is automatically generated code by `substrate-wasm-builder`. - - use substrate_wasm_builder::build_project_with_default_rustflags; - - fn main() {{ - build_project_with_default_rustflags( - "{file_path}", - "{cargo_toml_path}", - "{default_rustflags}", - ) - }} - "#, - file_path = replace_back_slashes(file_path.display()), - cargo_toml_path = replace_back_slashes(cargo_toml_path.display()), - default_rustflags = default_rustflags, - ), - ); -} - -fn run_project(project_folder: &Path) { - let cargo = env::var("CARGO").expect("`CARGO` env variable is always set when executing `build.rs`."); - let mut cmd = Command::new(cargo); - cmd.arg("run").arg(format!("--manifest-path={}", project_folder.join("Cargo.toml").display())); - - if env::var("DEBUG") != Ok(String::from("true")) { - cmd.arg("--release"); - } - - // Make sure we always run the `wasm-builder` project for the `HOST` architecture. - let host_triple = env::var("HOST").expect("`HOST` is always set when executing `build.rs`."); - cmd.arg(&format!("--target={}", host_triple)); - - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). - // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target - // directory inside of `CARGO_TARGET_DIR`. - cmd.env_remove("CARGO_TARGET_DIR"); - - if !cmd.status().map(|s| s.success()).unwrap_or(false) { - // Don't spam the output with backtraces when a build failed! - process::exit(1); - } -} - -/// Generate the name of the skip build environment variable for the current crate. -fn generate_crate_skip_build_env_name() -> String { - format!( - "SKIP_{}_WASM_BUILD", - env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), - ) -} - -/// Checks if the build of the WASM binary should be skipped. -fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() -} - -/// Check if we should provide a dummy WASM binary. -fn check_provide_dummy_wasm_binary() -> bool { - env::var(DUMMY_WASM_BINARY_ENV).is_ok() -} - -/// Provide the dummy WASM binary -/// -/// If `skip_build` is `true`, it will only generate the wasm binary if it doesn't exist. -fn provide_dummy_wasm_binary(file_path: &Path, skip_build: bool) { - if !skip_build || !file_path.exists() { - write_file_if_changed( - file_path.into(), - "pub const WASM_BINARY: Option<&[u8]> = None;\ - pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;".into(), - ); - } -} - -/// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is -/// rebuilt when needed. -fn generate_rerun_if_changed_instructions() { - // Make sure that the `build.rs` is called again if one of the following env variables changes. - println!("cargo:rerun-if-env-changed={}", SKIP_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", DUMMY_WASM_BINARY_ENV); - println!("cargo:rerun-if-env-changed={}", FORCE_WASM_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); -} - -/// Write to the given `file` if the `content` is different. -fn write_file_if_changed(file: PathBuf, content: String) { - if fs::read_to_string(&file).ok().as_ref() != Some(&content) { - fs::write(&file, content).unwrap_or_else(|_| panic!("Writing `{}` can not fail!", file.display())); - } -} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index e9dd1a97b89e4..c9d165ce8a140 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-wasm-builder" -version = "2.0.1" +version = "4.0.0" authors = ["Parity Technologies "] description = "Utility for building WASM binaries" edition = "2018" @@ -14,12 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] build-helper = "0.1.1" -cargo_metadata = "0.10.0" +cargo_metadata = "0.12.0" tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.1" -fs2 = "0.4.3" wasm-gc-api = "0.1.11" atty = "0.2.13" -itertools = "0.8.2" ansi_term = "0.12.1" diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index 1e24d2cebab32..3868faf1acab5 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -8,20 +8,23 @@ The Wasm builder is a tool that integrates the process of building the WASM bina A project that should be compiled as a Wasm binary needs to: 1. Add a `build.rs` file. -2. Add `substrate-wasm-builder` as dependency into `build-dependencies`. +2. Add `wasm-builder` as dependency into `build-dependencies`. The `build.rs` file needs to contain the following code: ```rust -use wasm_builder_runner::{build_current_project, WasmBuilderSource}; +use substrate_wasm_builder::WasmBuilder; fn main() { - build_current_project( - // The name of the file being generated in out-dir. - "wasm_binary.rs", - // How to include wasm-builder, in this case from crates.io. - WasmBuilderSource::Crates("1.0.0"), - ); + WasmBuilder::new() + // Tell the builder to build the project (crate) this `build.rs` is part of. + .with_current_project() + // Make sure to export the `heap_base` global, this is required by Substrate + .export_heap_base() + // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) + .import_memory() + // Build it. + .build() } ``` @@ -32,9 +35,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); ``` This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. -The former is a compact Wasm binary and the latter is not compacted. +The former is a compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. +Both variables have `Option<&'static [u8]>` as type. -### Feature +### Features Wasm builder supports to enable cargo features while building the Wasm binary. By default it will enable all features in the wasm build that are enabled for the native build except the @@ -46,19 +50,19 @@ Wasm binary. If this feature is not present, it will not be enabled. By using environment variables, you can configure which Wasm binaries are built and how: -- `SKIP_WASM_BUILD` - Skips building any wasm binary. This is useful when only native should be recompiled. -- `BUILD_DUMMY_WASM_BINARY` - Builds dummy wasm binaries. These dummy binaries are empty and useful - for `cargo check` runs. -- `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. +- `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. + If this is the first run and there doesn't exist a Wasm binary, this will set both + variables to `None`. +- `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. By default the build type is equal to the build type used by the main build. -- `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable - needs to change. As wasm builder instructs `cargo` to watch for file changes +- `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable + needs to change. As wasm-builder instructs `cargo` to watch for file changes this environment variable should only be required in certain circumstances. - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -- `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs +- `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs to be absolute. -- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The +- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs new file mode 100644 index 0000000000000..8ef6c95324c78 --- /dev/null +++ b/utils/wasm-builder/src/builder.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{env, path::{PathBuf, Path}, process}; + +/// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. +fn get_manifest_dir() -> PathBuf { + env::var("CARGO_MANIFEST_DIR") + .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") + .into() +} + +/// First step of the [`WasmBuilder`] to select the project to build. +pub struct WasmBuilderSelectProject { + /// This parameter just exists to make it impossible to construct + /// this type outside of this crate. + _ignore: (), +} + +impl WasmBuilderSelectProject { + /// Use the current project as project for building the WASM binary. + /// + /// # Panics + /// + /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable + /// is always set by `Cargo` in `build.rs` files. + pub fn with_current_project(self) -> WasmBuilder { + WasmBuilder { + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: get_manifest_dir().join("Cargo.toml"), + } + } + + /// Use the given `path` as project for building the WASM binary. + /// + /// Returns an error if the given `path` does not points to a `Cargo.toml`. + pub fn with_project( + self, + path: impl Into, + ) -> Result { + let path = path.into(); + + if path.ends_with("Cargo.toml") && path.exists() { + Ok(WasmBuilder { + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: path, + }) + } else { + Err("Project path must point to the `Cargo.toml` of the project") + } + } +} + +/// The builder for building a wasm binary. +/// +/// The builder itself is separated into multiple structs to make the setup type safe. +/// +/// Building a wasm binary: +/// +/// 1. Call [`WasmBuilder::new`] to create a new builder. +/// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. +/// 3. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code +/// using methods of [`WasmBuilder`]. +/// 4. Build the WASM binary using [`Self::build`]. +pub struct WasmBuilder { + /// Flags that should be appended to `RUST_FLAGS` env variable. + rust_flags: Vec, + /// The name of the file that is being generated in `OUT_DIR`. + /// + /// Defaults to `wasm_binary.rs`. + file_name: Option, + /// The path to the `Cargo.toml` of the project that should be built + /// for wasm. + project_cargo_toml: PathBuf, +} + +impl WasmBuilder { + /// Create a new instance of the builder. + pub fn new() -> WasmBuilderSelectProject { + WasmBuilderSelectProject { + _ignore: (), + } + } + + /// Enable exporting `__heap_base` as global variable in the WASM binary. + /// + /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. + pub fn export_heap_base(mut self) -> Self { + self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); + self + } + + /// Set the name of the file that will be generated in `OUT_DIR`. + /// + /// This file needs to be included to get access to the build WASM binary. + /// + /// If this function is not called, `file_name` defaults to `wasm_binary.rs` + pub fn set_file_name(mut self, file_name: impl Into) -> Self { + self.file_name = Some(file_name.into()); + self + } + + /// Instruct the linker to import the memory into the WASM binary. + /// + /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. + pub fn import_memory(mut self) -> Self { + self.rust_flags.push("-C link-arg=--import-memory".into()); + self + } + + /// Append the given `flag` to `RUST_FLAGS`. + /// + /// `flag` is appended as is, so it needs to be a valid flag. + pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { + self.rust_flags.push(flag.into()); + self + } + + /// Build the WASM binary. + pub fn build(self) { + let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); + let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); + + if check_skip_build() { + // If we skip the build, we still want to make sure to be called when an env variable + // changes + generate_rerun_if_changed_instructions(); + + provide_dummy_wasm_binary_if_not_exist(&file_path); + + return; + } + + build_project( + file_path, + self.project_cargo_toml, + self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect(), + ); + + // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't + // want to spam the output! + generate_rerun_if_changed_instructions(); + } +} + +/// Generate the name of the skip build environment variable for the current crate. +fn generate_crate_skip_build_env_name() -> String { + format!( + "SKIP_{}_WASM_BUILD", + env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), + ) +} + +/// Checks if the build of the WASM binary should be skipped. +fn check_skip_build() -> bool { + env::var(crate::SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() +} + +/// Provide a dummy WASM binary if there doesn't exist one. +fn provide_dummy_wasm_binary_if_not_exist(file_path: &Path) { + if !file_path.exists() { + crate::write_file_if_changed( + file_path, + "pub const WASM_BINARY: Option<&[u8]> = None;\ + pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;", + ); + } +} + +/// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is +/// rebuilt when needed. +fn generate_rerun_if_changed_instructions() { + // Make sure that the `build.rs` is called again if one of the following env variables changes. + println!("cargo:rerun-if-env-changed={}", crate::SKIP_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", crate::FORCE_WASM_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); +} + +/// Build the currently built project as wasm binary. +/// +/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. +/// +/// `file_name` - The name + path of the file being generated. The file contains the +/// constant `WASM_BINARY`, which contains the built WASM binary. +/// `project_cargo_toml` - The path to the `Cargo.toml` of the project that should be built. +/// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. +fn build_project( + file_name: PathBuf, + project_cargo_toml: PathBuf, + default_rustflags: String, +) { + let cargo_cmd = match crate::prerequisites::check() { + Ok(cmd) => cmd, + Err(err_msg) => { + eprintln!("{}", err_msg); + process::exit(1); + }, + }; + + let (wasm_binary, bloaty) = crate::wasm_project::create_and_compile( + &project_cargo_toml, + &default_rustflags, + cargo_cmd, + ); + + let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { + ( + wasm_binary.wasm_binary_path_escaped(), + bloaty.wasm_binary_bloaty_path_escaped(), + ) + } else { + ( + bloaty.wasm_binary_bloaty_path_escaped(), + bloaty.wasm_binary_bloaty_path_escaped(), + ) + }; + + crate::write_file_if_changed( + file_name, + format!( + r#" + pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); + pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); + "#, + wasm_binary = wasm_binary, + wasm_binary_bloaty = wasm_binary_bloaty, + ), + ); +} diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index aa63e9596e190..0a3c856344dcd 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,20 +25,23 @@ //! A project that should be compiled as a Wasm binary needs to: //! //! 1. Add a `build.rs` file. -//! 2. Add `substrate-wasm-builder` as dependency into `build-dependencies`. +//! 2. Add `wasm-builder` as dependency into `build-dependencies`. //! //! The `build.rs` file needs to contain the following code: //! -//! ```ignore -//! use wasm_builder_runner::{build_current_project, WasmBuilderSource}; +//! ```no_run +//! use substrate_wasm_builder::WasmBuilder; //! //! fn main() { -//! build_current_project( -//! // The name of the file being generated in out-dir. -//! "wasm_binary.rs", -//! // How to include wasm-builder, in this case from crates.io. -//! WasmBuilderSource::Crates("1.0.0"), -//! ); +//! WasmBuilder::new() +//! // Tell the builder to build the project (crate) this `build.rs` is part of. +//! .with_current_project() +//! // Make sure to export the `heap_base` global, this is required by Substrate +//! .export_heap_base() +//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) +//! .import_memory() +//! // Build it. +//! .build() //! } //! ``` //! @@ -49,7 +52,8 @@ //! ``` //! //! This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. -//! The former is a compact Wasm binary and the latter is not compacted. +//! The former is a compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. +//! Both variables have `Option<&'static [u8]>` as type. //! //! ### Feature //! @@ -63,19 +67,19 @@ //! //! By using environment variables, you can configure which Wasm binaries are built and how: //! -//! - `SKIP_WASM_BUILD` - Skips building any wasm binary. This is useful when only native should be recompiled. -//! - `BUILD_DUMMY_WASM_BINARY` - Builds dummy wasm binaries. These dummy binaries are empty and useful -//! for `cargo check` runs. -//! - `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. +//! - `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. +//! If this is the first run and there doesn't exist a Wasm binary, this will set both +//! variables to `None`. +//! - `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. //! By default the build type is equal to the build type used by the main build. -//! - `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable -//! needs to change. As wasm builder instructs `cargo` to watch for file changes +//! - `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable +//! needs to change. As wasm-builder instructs `cargo` to watch for file changes //! this environment variable should only be required in certain circumstances. //! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. //! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -//! - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs +//! - `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs //! to be absolute. -//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The +//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The //! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. //! //! Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. @@ -92,11 +96,14 @@ //! as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, //! the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. -use std::{env, fs, path::{PathBuf, Path}, process::{Command, self}, io::BufRead}; +use std::{env, fs, path::{PathBuf, Path}, process::Command, io::BufRead}; +mod builder; mod prerequisites; mod wasm_project; +pub use builder::{WasmBuilder, WasmBuilderSelectProject}; + /// Environment variable that tells us to skip building the wasm binary. const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; @@ -120,87 +127,8 @@ const WASM_BUILD_NO_COLOR: &str = "WASM_BUILD_NO_COLOR"; /// Environment variable to set the toolchain used to compile the wasm binary. const WASM_BUILD_TOOLCHAIN: &str = "WASM_BUILD_TOOLCHAIN"; -/// Build the currently built project as wasm binary. -/// -/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. -/// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. -pub fn build_project(file_name: &str, cargo_manifest: &str) { - build_project_with_default_rustflags(file_name, cargo_manifest, ""); -} - -/// Build the currently built project as wasm binary. -/// -/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. -/// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. -/// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. -pub fn build_project_with_default_rustflags( - file_name: &str, - cargo_manifest: &str, - default_rustflags: &str, -) { - if check_skip_build() { - return; - } - - let cargo_manifest = PathBuf::from(cargo_manifest); - - if !cargo_manifest.exists() { - panic!("'{}' does not exist!", cargo_manifest.display()); - } - - if !cargo_manifest.ends_with("Cargo.toml") { - panic!("'{}' no valid path to a `Cargo.toml`!", cargo_manifest.display()); - } - - let cargo_cmd = match prerequisites::check() { - Ok(cmd) => cmd, - Err(err_msg) => { - eprintln!("{}", err_msg); - process::exit(1); - }, - }; - - let (wasm_binary, bloaty) = wasm_project::create_and_compile( - &cargo_manifest, - default_rustflags, - cargo_cmd, - ); - - let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { - ( - wasm_binary.wasm_binary_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) - } else { - ( - bloaty.wasm_binary_bloaty_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) - }; - - write_file_if_changed( - file_name, - format!( - r#" - pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); - pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); - "#, - wasm_binary = wasm_binary, - wasm_binary_bloaty = wasm_binary_bloaty, - ), - ); -} - -/// Checks if the build of the WASM binary should be skipped. -fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() -} +/// Environment variable that makes sure the WASM build is triggered. +const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; /// Write to the given `file` if the `content` is different. fn write_file_if_changed(file: impl AsRef, content: impl AsRef) { @@ -217,7 +145,9 @@ fn copy_file_if_changed(src: PathBuf, dst: PathBuf) { if src_file != dst_file { fs::copy(&src, &dst) - .unwrap_or_else(|_| panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display())); + .unwrap_or_else( + |_| panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display()) + ); } } diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index 3df2707d1d441..5dedcc4641a72 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index c27af71988b07..73dc2e13af348 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,10 +30,6 @@ use cargo_metadata::{MetadataCommand, Metadata}; use walkdir::WalkDir; -use fs2::FileExt; - -use itertools::Itertools; - /// Colorize an info message. /// /// Returns the colorized message. @@ -70,31 +66,6 @@ impl WasmBinary { } } -/// A lock for the WASM workspace. -struct WorkspaceLock(fs::File); - -impl WorkspaceLock { - /// Create a new lock - fn new(wasm_workspace_root: &Path) -> Self { - let lock = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(wasm_workspace_root.join("wasm_workspace.lock")) - .expect("Opening the lock file does not fail"); - - lock.lock_exclusive().expect("Locking `wasm_workspace.lock` failed"); - - WorkspaceLock(lock) - } -} - -impl Drop for WorkspaceLock { - fn drop(&mut self) { - let _ = self.0.unlock(); - } -} - fn crate_metadata(cargo_manifest: &Path) -> Metadata { let mut cargo_lock = cargo_manifest.to_path_buf(); cargo_lock.set_file_name("Cargo.lock"); @@ -120,35 +91,36 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns +/// /// The path to the compact WASM binary and the bloaty WASM binary. pub(crate) fn create_and_compile( - cargo_manifest: &Path, + project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, ) -> (Option, WasmBinaryBloaty) { let wasm_workspace_root = get_wasm_workspace_root(); let wasm_workspace = wasm_workspace_root.join("wbuild"); - // Lock the workspace exclusively for us - let _lock = WorkspaceLock::new(&wasm_workspace_root); + let crate_metadata = crate_metadata(project_cargo_toml); - let crate_metadata = crate_metadata(cargo_manifest); - - let project = create_project(cargo_manifest, &wasm_workspace, &crate_metadata); - create_wasm_workspace_project(&wasm_workspace, &crate_metadata.workspace_root); + let project = create_project( + project_cargo_toml, + &wasm_workspace, + &crate_metadata, + &crate_metadata.workspace_root, + ); build_project(&project, default_rustflags, cargo_cmd); let (wasm_binary, bloaty) = compact_wasm_file( &project, - cargo_manifest, - &wasm_workspace, + project_cargo_toml, ); wasm_binary.as_ref().map(|wasm_binary| - copy_wasm_to_target_directory(cargo_manifest, wasm_binary) + copy_wasm_to_target_directory(project_cargo_toml, wasm_binary) ); - generate_rerun_if_changed_instructions(cargo_manifest, &project, &wasm_workspace); + generate_rerun_if_changed_instructions(project_cargo_toml, &project, &wasm_workspace); (wasm_binary, bloaty) } @@ -221,69 +193,14 @@ fn get_wasm_workspace_root() -> PathBuf { panic!("Could not find target dir in: {}", build_helper::out_dir().display()) } -/// Find all workspace members. -/// -/// Each folder in `wasm_workspace` is seen as a member of the workspace. Exceptions are -/// folders starting with "." and the "target" folder. -/// -/// Every workspace member that is not valid anymore is deleted (the folder of it). A -/// member is not valid anymore when the `wasm-project` dependency points to an non-existing -/// folder or the package name is not valid. -fn find_and_clear_workspace_members(wasm_workspace: &Path) -> Vec { - let mut members = WalkDir::new(wasm_workspace) - .min_depth(1) - .max_depth(1) - .into_iter() - .filter_map(|p| p.ok()) - .map(|d| d.into_path()) - .filter(|p| p.is_dir()) - .filter_map(|p| p.file_name().map(|f| f.to_owned()).and_then(|s| s.into_string().ok())) - .filter(|f| !f.starts_with('.') && f != "target") - .collect::>(); - - let mut i = 0; - while i != members.len() { - let path = wasm_workspace.join(&members[i]).join("Cargo.toml"); - - // Extract the `wasm-project` dependency. - // If the path can be extracted and is valid and the package name matches, - // the member is valid. - if let Some(mut wasm_project) = fs::read_to_string(path) - .ok() - .and_then(|s| toml::from_str::(&s).ok()) - .and_then(|mut t| t.remove("dependencies")) - .and_then(|p| p.try_into::
().ok()) - .and_then(|mut t| t.remove("wasm_project")) - .and_then(|p| p.try_into::
().ok()) - { - if let Some(path) = wasm_project.remove("path") - .and_then(|p| p.try_into::().ok()) - { - if let Some(name) = wasm_project.remove("package") - .and_then(|p| p.try_into::().ok()) - { - let path = PathBuf::from(path); - if path.exists() { - if name == get_crate_name(&path.join("Cargo.toml")) { - i += 1; - continue - } - } - } - } - } - - fs::remove_dir_all(wasm_workspace.join(&members[i])) - .expect("Removing invalid workspace member can not fail; qed"); - members.remove(i); - } - - members -} - -fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Path) { - let members = find_and_clear_workspace_members(wasm_workspace); - +fn create_project_cargo_toml( + wasm_workspace: &Path, + workspace_root_path: &Path, + crate_name: &str, + crate_path: &Path, + wasm_binary: &str, + enabled_features: &[String], +) { let mut workspace_toml: Table = toml::from_str( &fs::read_to_string( workspace_root_path.join("Cargo.toml"), @@ -306,12 +223,6 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Pa wasm_workspace_toml.insert("profile".into(), profile.into()); - // Add `workspace` with members - let mut workspace = Table::new(); - workspace.insert("members".into(), members.into()); - - wasm_workspace_toml.insert("workspace".into(), workspace.into()); - // Add patch section from the project root `Cargo.toml` if let Some(mut patch) = workspace_toml.remove("patch").and_then(|p| p.try_into::
().ok()) { // Iterate over all patches and make the patch path absolute from the workspace root path. @@ -335,6 +246,33 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Pa wasm_workspace_toml.insert("patch".into(), patch.into()); } + let mut package = Table::new(); + package.insert("name".into(), format!("{}-wasm", crate_name).into()); + package.insert("version".into(), "1.0.0".into()); + package.insert("edition".into(), "2018".into()); + + wasm_workspace_toml.insert("package".into(), package.into()); + + let mut lib = Table::new(); + lib.insert("name".into(), wasm_binary.into()); + lib.insert("crate-type".into(), vec!["cdylib".to_string()].into()); + + wasm_workspace_toml.insert("lib".into(), lib.into()); + + let mut dependencies = Table::new(); + + let mut wasm_project = Table::new(); + wasm_project.insert("package".into(), crate_name.into()); + wasm_project.insert("path".into(), crate_path.display().to_string().into()); + wasm_project.insert("default-features".into(), false.into()); + wasm_project.insert("features".into(), enabled_features.to_vec().into()); + + dependencies.insert("wasm-project".into(), wasm_project.into()); + + wasm_workspace_toml.insert("dependencies".into(), dependencies.into()); + + wasm_workspace_toml.insert("workspace".into(), Table::new().into()); + write_file_if_changed( wasm_workspace.join("Cargo.toml"), toml::to_string_pretty(&wasm_workspace_toml).expect("Wasm workspace toml is valid; qed"), @@ -394,56 +332,48 @@ fn has_runtime_wasm_feature_declared( /// Create the project used to build the wasm binary. /// /// # Returns -/// The path to the created project. -fn create_project(cargo_manifest: &Path, wasm_workspace: &Path, crate_metadata: &Metadata) -> PathBuf { - let crate_name = get_crate_name(cargo_manifest); - let crate_path = cargo_manifest.parent().expect("Parent path exists; qed"); - let wasm_binary = get_wasm_binary_name(cargo_manifest); - let project_folder = wasm_workspace.join(&crate_name); - - fs::create_dir_all(project_folder.join("src")) +/// +/// The path to the created wasm project. +fn create_project( + project_cargo_toml: &Path, + wasm_workspace: &Path, + crate_metadata: &Metadata, + workspace_root_path: &Path, +) -> PathBuf { + let crate_name = get_crate_name(project_cargo_toml); + let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); + let wasm_binary = get_wasm_binary_name(project_cargo_toml); + let wasm_project_folder = wasm_workspace.join(&crate_name); + + fs::create_dir_all(wasm_project_folder.join("src")) .expect("Wasm project dir create can not fail; qed"); - let mut enabled_features = project_enabled_features(&cargo_manifest, &crate_metadata); + let mut enabled_features = project_enabled_features(&project_cargo_toml, &crate_metadata); - if has_runtime_wasm_feature_declared(cargo_manifest, crate_metadata) { + if has_runtime_wasm_feature_declared(project_cargo_toml, crate_metadata) { enabled_features.push("runtime-wasm".into()); } - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" - [package] - name = "{crate_name}-wasm" - version = "1.0.0" - edition = "2018" - - [lib] - name = "{wasm_binary}" - crate-type = ["cdylib"] - - [dependencies] - wasm_project = {{ package = "{crate_name}", path = "{crate_path}", default-features = false, features = [ {features} ] }} - "#, - crate_name = crate_name, - crate_path = crate_path.display(), - wasm_binary = wasm_binary, - features = enabled_features.into_iter().map(|f| format!("\"{}\"", f)).join(","), - ) + create_project_cargo_toml( + &wasm_project_folder, + workspace_root_path, + &crate_name, + &crate_path, + &wasm_binary, + &enabled_features, ); write_file_if_changed( - project_folder.join("src/lib.rs"), + wasm_project_folder.join("src/lib.rs"), "#![no_std] pub use wasm_project::*;", ); - if let Some(crate_lock_file) = find_cargo_lock(cargo_manifest) { + if let Some(crate_lock_file) = find_cargo_lock(project_cargo_toml) { // Use the `Cargo.lock` of the main project. - crate::copy_file_if_changed(crate_lock_file, wasm_workspace.join("Cargo.lock")); + crate::copy_file_if_changed(crate_lock_file, wasm_project_folder.join("Cargo.lock")); } - project_folder + wasm_project_folder } /// Returns if the project should be built as a release. @@ -474,9 +404,13 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), ); - build_cmd.args(&["rustc", "--target=wasm32-unknown-unknown"]) + build_cmd.args(&["-Zfeatures=build_dep", "rustc", "--target=wasm32-unknown-unknown"]) .arg(format!("--manifest-path={}", manifest_path.display())) .env("RUSTFLAGS", rustflags) + // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). + // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target + // directory inside of `CARGO_TARGET_DIR`. + .env_remove("CARGO_TARGET_DIR") // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); @@ -503,14 +437,14 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman fn compact_wasm_file( project: &Path, cargo_manifest: &Path, - wasm_workspace: &Path, ) -> (Option, WasmBinaryBloaty) { let is_release_build = is_release_build(); let target = if is_release_build { "release" } else { "debug" }; let wasm_binary = get_wasm_binary_name(cargo_manifest); - let wasm_file = wasm_workspace.join("target/wasm32-unknown-unknown") + let wasm_file = project.join("target/wasm32-unknown-unknown") .join(target) .join(format!("{}.wasm", wasm_binary)); + let wasm_compact_file = if is_release_build { let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file)

| P::from_parts(i.saturated_into::())) + .collect()) } } diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 035a704ba3009..caaa4c33cd431 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ use sp_std::{ops, fmt, prelude::*, convert::TryInto}; use codec::{Encode, CompactAs}; use crate::traits::{ SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned, + One, }; use sp_debug_derive::RuntimeDebug; @@ -37,13 +38,17 @@ pub trait PerThing: Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug { /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Unsigned + Copy + fmt::Debug; + type Inner: BaseArithmetic + Unsigned + Copy + Into + fmt::Debug; /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. /// It must be able to compute `ACCURACY^2`. - type Upper: - BaseArithmetic + Copy + From + TryInto + - UniqueSaturatedInto + Unsigned + fmt::Debug; + type Upper: BaseArithmetic + + Copy + + From + + TryInto + + UniqueSaturatedInto + + Unsigned + + fmt::Debug; /// The accuracy of this type. const ACCURACY: Self::Inner; @@ -61,20 +66,18 @@ pub trait PerThing: fn is_one(&self) -> bool { self.deconstruct() == Self::ACCURACY } /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` - /// but more accurate. + /// but more accurate and can cope with potential type overflows. fn from_percent(x: Self::Inner) -> Self { - let a = x.min(100.into()); - let b = Self::ACCURACY; - // if Self::ACCURACY % 100 > 0 then we need the correction for accuracy - let c = rational_mul_correction::(b, a, 100.into(), Rounding::Nearest); - Self::from_parts(a / 100.into() * b + c) + let a: Self::Inner = x.min(100.into()); + let b: Self::Inner = 100.into(); + Self::from_rational_approximation::(a, b) } /// Return the product of multiplication of this value by itself. fn square(self) -> Self { let p = Self::Upper::from(self.deconstruct()); let q = Self::Upper::from(Self::ACCURACY); - Self::from_rational_approximation(p * p, q * q) + Self::from_rational_approximation::(p * p, q * q) } /// Multiplication that always rounds down to a whole number. The standard `Mul` rounds to the @@ -93,8 +96,10 @@ pub trait PerThing: /// # } /// ``` fn mul_floor(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Unsigned + where + N: Clone + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Unsigned, + Self::Inner: Into, { overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) } @@ -115,8 +120,10 @@ pub trait PerThing: /// # } /// ``` fn mul_ceil(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Unsigned + where + N: Clone + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Unsigned, + Self::Inner: Into { overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) } @@ -131,9 +138,11 @@ pub trait PerThing: /// # } /// ``` fn saturating_reciprocal_mul(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned + where + N: Clone + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Saturating + + Unsigned, + Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) } @@ -151,9 +160,11 @@ pub trait PerThing: /// # } /// ``` fn saturating_reciprocal_mul_floor(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned + where + N: Clone + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Saturating + + Unsigned, + Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) } @@ -171,9 +182,11 @@ pub trait PerThing: /// # } /// ``` fn saturating_reciprocal_mul_ceil(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned + where + N: Clone + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Saturating + + Unsigned, + Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) } @@ -201,14 +214,16 @@ pub trait PerThing: /// # fn main () { /// // 989/100 is technically closer to 99%. /// assert_eq!( - /// Percent::from_rational_approximation(989u64, 1000), - /// Percent::from_parts(98), - /// ); + /// Percent::from_rational_approximation(989u64, 1000), + /// Percent::from_parts(98), + /// ); /// # } /// ``` fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + TryInto + - ops::Div + ops::Rem + ops::Add + Unsigned; + where + N: Clone + Ord + TryInto + TryInto + + ops::Div + ops::Rem + ops::Add + Unsigned, + Self::Inner: Into; } /// The rounding method to use. @@ -223,15 +238,12 @@ enum Rounding { /// Saturating reciprocal multiplication. Compute `x / self`, saturating at the numeric /// bounds instead of overflowing. -fn saturating_reciprocal_mul( - x: N, - part: P::Inner, - rounding: Rounding, -) -> N +fn saturating_reciprocal_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Div + ops::Mul + ops::Add + ops::Rem + Saturating + Unsigned, P: PerThing, + P::Inner: Into, { let maximum: N = P::ACCURACY.into(); let c = rational_mul_correction::( @@ -244,15 +256,12 @@ where } /// Overflow-prune multiplication. Accurately multiply a value by `self` without overflowing. -fn overflow_prune_mul( - x: N, - part: P::Inner, - rounding: Rounding, -) -> N +fn overflow_prune_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Div + ops::Mul + ops::Add + ops::Rem + Unsigned, P: PerThing, + P::Inner: Into, { let maximum: N = P::ACCURACY.into(); let part_n: N = part.into(); @@ -269,19 +278,15 @@ where /// /// Take the remainder of `x / denom` and multiply by `numer / denom`. The result can be added /// to `x / denom * numer` for an accurate result. -fn rational_mul_correction( - x: N, - numer: P::Inner, - denom: P::Inner, - rounding: Rounding, -) -> N +fn rational_mul_correction(x: N, numer: P::Inner, denom: P::Inner, rounding: Rounding) -> N where - N: From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Div + ops::Mul + ops::Add + ops::Rem + Unsigned, P: PerThing, + P::Inner: Into { let numer_upper = P::Upper::from(numer); - let denom_n = N::from(denom); + let denom_n: N = denom.into(); let denom_upper = P::Upper::from(denom); let rem = x.rem(denom_n); // `rem` is less than `denom`, which fits in `P::Inner`. @@ -333,9 +338,9 @@ macro_rules! implement_per_thing { fn encode_as(&self) -> &Self::As { &self.0 } - fn decode_from(x: Self::As) -> Self { - // Saturates if `x` is more than `$max` internally. - Self::from_parts(x) + fn decode_from(x: Self::As) -> Result { + // Saturates if `x` is more than `$max` internally. + Ok(Self::from_parts(x)) } } @@ -364,14 +369,17 @@ macro_rules! implement_per_thing { } fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + TryInto - + ops::Div + ops::Rem + ops::Add + Unsigned + where + N: Clone + Ord + TryInto + TryInto + + ops::Div + ops::Rem + ops::Add + Unsigned + + Zero + One, + Self::Inner: Into, { let div_ceil = |x: N, f: N| -> N { let mut o = x.clone() / f.clone(); let r = x.rem(f.clone()); - if r > N::from(0) { - o = o + N::from(1); + if r > N::zero() { + o = o + N::one(); } o }; @@ -466,54 +474,66 @@ macro_rules! implement_per_thing { /// See [`PerThing::from_rational_approximation`]. pub fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From<$type> + TryInto<$type> + + where N: Clone + Ord + TryInto<$type> + TryInto<$upper_type> + ops::Div + ops::Rem + - ops::Add + Unsigned + ops::Add + Unsigned, + $type: Into, { ::from_rational_approximation(p, q) } /// See [`PerThing::mul_floor`]. pub fn mul_floor(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + - ops::Rem + ops::Div + ops::Mul + - ops::Add + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + + ops::Rem + ops::Div + ops::Mul + + ops::Add + Unsigned, + $type: Into, + { PerThing::mul_floor(self, b) } /// See [`PerThing::mul_ceil`]. pub fn mul_ceil(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + - ops::Rem + ops::Div + ops::Mul + - ops::Add + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + + ops::Rem + ops::Div + ops::Mul + + ops::Add + Unsigned, + $type: Into, { PerThing::mul_ceil(self, b) } /// See [`PerThing::saturating_reciprocal_mul`]. pub fn saturating_reciprocal_mul(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + - ops::Div + ops::Mul + ops::Add + - Saturating + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating + Unsigned, + $type: Into, { PerThing::saturating_reciprocal_mul(self, b) } /// See [`PerThing::saturating_reciprocal_mul_floor`]. pub fn saturating_reciprocal_mul_floor(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + - ops::Div + ops::Mul + ops::Add + - Saturating + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating + Unsigned, + $type: Into, { PerThing::saturating_reciprocal_mul_floor(self, b) } /// See [`PerThing::saturating_reciprocal_mul_ceil`]. pub fn saturating_reciprocal_mul_ceil(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + - ops::Div + ops::Mul + ops::Add + - Saturating + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating + Unsigned, + $type: Into, { PerThing::saturating_reciprocal_mul_ceil(self, b) } @@ -613,8 +633,9 @@ macro_rules! implement_per_thing { /// This is tailored to be used with a balance type. impl ops::Mul for $name where - N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + ops::Add + Unsigned, + $type: Into, { type Output = N; fn mul(self, b: N) -> Self::Output { @@ -707,6 +728,7 @@ macro_rules! implement_per_thing { assert_eq!($name::from_percent(0), $name::from_parts(Zero::zero())); assert_eq!($name::from_percent(10), $name::from_parts($max / 10)); + assert_eq!($name::from_percent(50), $name::from_parts($max / 2)); assert_eq!($name::from_percent(100), $name::from_parts($max)); assert_eq!($name::from_percent(200), $name::from_parts($max)); @@ -717,6 +739,15 @@ macro_rules! implement_per_thing { assert_eq!($name::from_fraction(-1.0), $name::from_parts(Zero::zero())); } + #[test] + fn percent_trait_impl_works() { + assert_eq!(<$name as PerThing>::from_percent(0), $name::from_parts(Zero::zero())); + assert_eq!(<$name as PerThing>::from_percent(10), $name::from_parts($max / 10)); + assert_eq!(<$name as PerThing>::from_percent(50), $name::from_parts($max / 2)); + assert_eq!(<$name as PerThing>::from_percent(100), $name::from_parts($max)); + assert_eq!(<$name as PerThing>::from_percent(200), $name::from_parts($max)); + } + macro_rules! u256ify { ($val:expr) => { Into::::into($val) diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 07556bc0e2d71..88eaca1efb6c4 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index ce645cfe65d94..ea297077e351c 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index ae373f1866ff6..a32b13ca728d8 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authority-discovery" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/authority-discovery/src/lib.rs b/primitives/authority-discovery/src/lib.rs index 0ae47c9758ee6..b04ce43a2c747 100644 --- a/primitives/authority-discovery/src/lib.rs +++ b/primitives/authority-discovery/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index b6f463029077f..5455902fddc34 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authorship" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-inherents = { version = "3.0.0", default-features = false, path = "../inherents" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs index a760c546a25d7..7bf6769951b27 100644 --- a/primitives/authorship/src/lib.rs +++ b/primitives/authorship/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 767307c2a842a..6081e872786ef 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-block-builder" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-inherents = { version = "3.0.0", default-features = false, path = "../inherents" } [features] default = [ "std" ] diff --git a/primitives/block-builder/src/lib.rs b/primitives/block-builder/src/lib.rs index 6367a18afa615..f51d041c9f1c5 100644 --- a/primitives/block-builder/src/lib.rs +++ b/primitives/block-builder/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index f714aaaa1dae1..092d961162367 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-blockchain" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,12 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -lru = "0.4.0" -parking_lot = "0.10.0" +lru = "0.6.1" +parking_lot = "0.11.1" thiserror = "1.0.21" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0", path = "../consensus/common" } -sp-runtime = { version = "2.0.0", path = "../runtime" } -sp-block-builder = { version = "2.0.0", path = "../block-builder" } -sp-state-machine = { version = "0.8.0", path = "../state-machine" } -sp-database = { version = "2.0.0", path = "../database" } +futures = "0.3.9" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.9.0", path = "../consensus/common" } +sp-runtime = { version = "3.0.0", path = "../runtime" } +sp-state-machine = { version = "0.9.0", path = "../state-machine" } +sp-database = { version = "3.0.0", path = "../database" } +sp-api = { version = "3.0.0", path = "../api" } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 1328dfb5752fc..b5efcfb02198a 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,7 +53,7 @@ pub trait HeaderBackend: Send + Sync { /// Convert an arbitrary block ID into a block hash. fn block_number_from_id(&self, id: &BlockId) -> Result>> { match *id { - BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), + BlockId::Hash(h) => self.number(h), BlockId::Number(n) => Ok(Some(n)), } } @@ -172,7 +172,7 @@ pub trait Backend: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata Result::Extrinsic>>; + + /// Check if extrinsic exists. + fn have_extrinsic(&self, hash: &Block::Hash) -> Result { + Ok(self.extrinsic(hash)?.is_some()) + } } /// Provides access to the optional cache. diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 6c9ab88fd1b00..6ed5fe1b335f3 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,12 +22,14 @@ use sp_state_machine; use sp_runtime::transaction_validity::TransactionValidityError; use sp_consensus; use codec::Error as CodecError; +use sp_api::ApiError; /// Client Result type alias pub type Result = result::Result; /// Error when the runtime failed to apply an extrinsic. #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum ApplyExtrinsicFailed { /// The transaction cannot be included into the current block. /// @@ -35,114 +37,142 @@ pub enum ApplyExtrinsicFailed { /// unappliable onto the current block. #[error("Extrinsic is not valid: {0:?}")] Validity(#[from] TransactionValidityError), - /// This is used for miscellaneous errors that can be represented by string and not handleable. - /// - /// This will become obsolete with complete migration to v4 APIs. - #[error("Extrinsic failed: {0}")] - Msg(String), + + #[error("Application specific error")] + Application(#[source] Box), } /// Substrate Client error #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] pub enum Error { - /// Consensus Error + #[error("Cancelled oneshot channel {0}")] + OneShotCancelled(#[from] futures::channel::oneshot::Canceled), + #[error(transparent)] Consensus(#[from] sp_consensus::Error), - /// Backend error. + #[error("Backend error: {0}")] Backend(String), - /// Unknown block. + #[error("UnknownBlock: {0}")] UnknownBlock(String), - /// The `apply_extrinsic` is not valid due to the given `TransactionValidityError`. + #[error(transparent)] ApplyExtrinsicFailed(#[from] ApplyExtrinsicFailed), - /// Execution error. + + #[error("Child type is invalid")] + InvalidChildType, + + #[error("RemoteBodyRequest: invalid extrinsics root expected: {expected} but got {received}")] + ExtrinsicRootInvalid { received: String, expected: String }, + + // `inner` cannot be made member, since it lacks `std::error::Error` trait bounds. #[error("Execution failed: {0:?}")] Execution(Box), - /// Blockchain error. + #[error("Blockchain")] Blockchain(#[source] Box), - /// Invalid authorities set received from the runtime. + + /// A error used by various storage subsystems. + /// + /// Eventually this will be replaced. + #[error("{0}")] + StorageChanges(sp_state_machine::DefaultError), + + #[error("Invalid child storage key")] + InvalidChildStorageKey, + #[error("Current state of blockchain has invalid authorities set")] InvalidAuthoritiesSet, - /// Could not get runtime version. + #[error("Failed to get runtime version: {0}")] VersionInvalid(String), - /// Genesis config is invalid. + #[error("Genesis config provided is invalid")] GenesisInvalid, - /// Error decoding header justification. + #[error("error decoding justification for header")] JustificationDecode, - /// Justification for header is correctly encoded, but invalid. + #[error("bad justification for header: {0}")] BadJustification(String), - /// Not available on light client. + #[error("This method is not currently available when running in light client mode")] NotAvailableOnLightClient, - /// Invalid remote CHT-based proof. + #[error("Remote node has responded with invalid header proof")] InvalidCHTProof, - /// Remote fetch has been cancelled. + #[error("Remote data fetch has been cancelled")] RemoteFetchCancelled, - /// Remote fetch has been failed. + #[error("Remote data fetch has been failed")] RemoteFetchFailed, - /// Error decoding call result. + #[error("Error decoding call result of {0}")] CallResultDecode(&'static str, #[source] CodecError), - /// Error converting a parameter between runtime and node. - #[error("Error converting `{0}` between runtime and node")] - RuntimeParamConversion(String), - /// Changes tries are not supported. + + #[error(transparent)] + RuntimeApiCodecError(#[from] ApiError), + + #[error("Runtime :code missing in storage")] + RuntimeCodeMissing, + #[error("Changes tries are not supported by the runtime")] ChangesTriesNotSupported, - /// Error reading changes tries configuration. + #[error("Error reading changes tries configuration")] ErrorReadingChangesTriesConfig, - /// Key changes query has failed. + #[error("Failed to check changes proof: {0}")] ChangesTrieAccessFailed(String), - /// Last finalized block not parent of current. + #[error("Did not finalize blocks in sequential order.")] NonSequentialFinalization(String), - /// Safety violation: new best block not descendent of last finalized. + #[error("Potential long-range attack: block not in finalized chain.")] NotInFinalizedChain, - /// Hash that is required for building CHT is missing. + #[error("Failed to get hash of block for building CHT")] MissingHashRequiredForCHT, - /// Invalid calculated state root on block import. + #[error("Calculated state root does not match.")] InvalidStateRoot, - /// Incomplete block import pipeline. + #[error("Incomplete block import pipeline.")] IncompletePipeline, + #[error("Transaction pool not ready for block production.")] TransactionPoolNotReady, + #[error("Database")] DatabaseError(#[from] sp_database::error::DatabaseError), - /// A convenience variant for String - #[error("{0}")] - Msg(String), -} -impl<'a> From<&'a str> for Error { - fn from(s: &'a str) -> Self { - Error::Msg(s.into()) - } -} + #[error("Failed to get header for hash {0}")] + MissingHeader(String), -impl From for Error { - fn from(s: String) -> Self { - Error::Msg(s) - } + + #[error("State Database error: {0}")] + StateDatabase(String), + + #[error(transparent)] + Application(#[from] Box), + + // Should be removed/improved once + // the storage `fn`s returns typed errors. + #[error("Runtime code error: {0}")] + RuntimeCode(&'static str), + + // Should be removed/improved once + // the storage `fn`s returns typed errors. + #[error("Storage error: {0}")] + Storage(String), } -impl From> for Error { - fn from(e: Box) -> Self { +impl From> for Error { + fn from(e: Box) -> Self { Self::from_state(e) } } @@ -163,4 +193,11 @@ impl Error { pub fn from_state(e: Box) -> Self { Error::Execution(e) } + + /// Construct from a state db error. + // Can not be done directly, since that would make cargo run out of stack if + // `sc-state-db` is lib is added as dependency. + pub fn from_state_db(e: E) -> Self where E: std::fmt::Debug { + Error::StateDatabase(format!("{:?}", e)) + } } diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index b8d9c5c934581..87d0057f32c24 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/blockchain/src/lib.rs b/primitives/blockchain/src/lib.rs index 27b9c3585e9ca..696050f57ac89 100644 --- a/primitives/blockchain/src/lib.rs +++ b/primitives/blockchain/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml index a94bd8ad0139e..ec3e731bb0e95 100644 --- a/primitives/chain-spec/Cargo.toml +++ b/primitives/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-chain-spec" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/chain-spec/src/lib.rs b/primitives/chain-spec/src/lib.rs index 869fae8236b76..5456718e351d1 100644 --- a/primitives/chain-spec/src/lib.rs +++ b/primitives/chain-spec/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 7ef5f67350baa..100c323024952 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-aura" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -13,13 +13,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-std = { version = "3.0.0", default-features = false, path = "../../std" } +sp-api = { version = "3.0.0", default-features = false, path = "../../api" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtime" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../timestamp" } +sp-consensus-slots = { version = "0.9.0", default-features = false, path = "../slots" } [features] default = ["std"] diff --git a/primitives/consensus/aura/src/inherents.rs b/primitives/consensus/aura/src/inherents.rs index a18bd33703061..35f686d93450c 100644 --- a/primitives/consensus/aura/src/inherents.rs +++ b/primitives/consensus/aura/src/inherents.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ use sp_inherents::{InherentDataProviders, ProvideInherentData}; pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"auraslot"; /// The type of the Aura inherent. -pub type InherentType = u64; +pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract Aura inherent data. pub trait AuraInherentData { @@ -48,6 +48,7 @@ impl AuraInherentData for InherentData { } /// Provides the slot duration inherent data for `Aura`. +// TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { slot_duration: u64, @@ -87,8 +88,8 @@ impl ProvideInherentData for InherentDataProvider { use sp_timestamp::TimestampInherentData; let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_num = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_num) + let slot = timestamp / self.slot_duration; + inherent_data.put_data(INHERENT_IDENTIFIER, &slot) } fn error_to_string(&self, error: &[u8]) -> Option { diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index cf0bcf2218a06..95630fa7b5c6b 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -61,6 +61,8 @@ pub mod ed25519 { pub type AuthorityId = app_ed25519::Public; } +pub use sp_consensus_slots::Slot; + /// The `ConsensusEngineId` of AuRa. pub const AURA_ENGINE_ID: ConsensusEngineId = [b'a', b'u', b'r', b'a']; @@ -71,10 +73,10 @@ pub type AuthorityIndex = u32; #[derive(Decode, Encode)] pub enum ConsensusLog { /// The authorities have changed. - #[codec(index = "1")] + #[codec(index = 1)] AuthoritiesChange(Vec), /// Disable the authority with given index. - #[codec(index = "2")] + #[codec(index = 2)] OnDisabled(AuthorityIndex), } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 4a22e3f77be4c..fb02014eeef51 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-babe" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } merlin = { version = "2.0", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8.0", optional = true, path = "../common" } -sp-consensus-slots = { version = "0.8.0", default-features = false, path = "../slots" } -sp-consensus-vrf = { version = "0.8.0", path = "../vrf", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.8.0", default-features = false, path = "../../keystore", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } +sp-std = { version = "3.0.0", default-features = false, path = "../../std" } +sp-api = { version = "3.0.0", default-features = false, path = "../../api" } +sp-consensus = { version = "0.9.0", optional = true, path = "../common" } +sp-consensus-slots = { version = "0.9.0", default-features = false, path = "../slots" } +sp-consensus-vrf = { version = "0.9.0", path = "../vrf", default-features = false } +sp-core = { version = "3.0.0", default-features = false, path = "../../core" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../inherents" } +sp-keystore = { version = "0.9.0", default-features = false, path = "../../keystore", optional = true } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "3.0.0", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index f7ae560afff34..5a89e1fbc015f 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, - BabeEpochConfiguration, SlotNumber, BABE_ENGINE_ID, + BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; use codec::{Codec, Decode, Encode}; use sp_std::vec::Vec; @@ -32,8 +32,8 @@ use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; pub struct PrimaryPreDigest { /// Authority index pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Slot + pub slot: Slot, /// VRF output pub vrf_output: VRFOutput, /// VRF proof @@ -50,8 +50,8 @@ pub struct SecondaryPlainPreDigest { /// it makes things easier for higher-level users of the chain data to /// be aware of the author of a secondary-slot block. pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Slot + pub slot: Slot, } /// BABE secondary deterministic slot assignment with VRF outputs. @@ -59,8 +59,8 @@ pub struct SecondaryPlainPreDigest { pub struct SecondaryVRFPreDigest { /// Authority index pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Slot + pub slot: Slot, /// VRF output pub vrf_output: VRFOutput, /// VRF proof @@ -73,13 +73,13 @@ pub struct SecondaryVRFPreDigest { #[derive(Clone, RuntimeDebug, Encode, Decode)] pub enum PreDigest { /// A primary VRF-based slot assignment. - #[codec(index = "1")] + #[codec(index = 1)] Primary(PrimaryPreDigest), /// A secondary deterministic slot assignment. - #[codec(index = "2")] + #[codec(index = 2)] SecondaryPlain(SecondaryPlainPreDigest), /// A secondary deterministic slot assignment with VRF outputs. - #[codec(index = "3")] + #[codec(index = 3)] SecondaryVRF(SecondaryVRFPreDigest), } @@ -93,12 +93,12 @@ impl PreDigest { } } - /// Returns the slot number of the pre digest. - pub fn slot_number(&self) -> SlotNumber { + /// Returns the slot of the pre digest. + pub fn slot(&self) -> Slot { match self { - PreDigest::Primary(primary) => primary.slot_number, - PreDigest::SecondaryPlain(secondary) => secondary.slot_number, - PreDigest::SecondaryVRF(secondary) => secondary.slot_number, + PreDigest::Primary(primary) => primary.slot, + PreDigest::SecondaryPlain(secondary) => secondary.slot, + PreDigest::SecondaryVRF(secondary) => secondary.slot, } } @@ -137,7 +137,7 @@ pub struct NextEpochDescriptor { #[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] pub enum NextConfigDescriptor { /// Version 1. - #[codec(index = "1")] + #[codec(index = 1)] V1 { /// Value of `c` in `BabeEpochConfiguration`. c: (u64, u64), diff --git a/primitives/consensus/babe/src/inherents.rs b/primitives/consensus/babe/src/inherents.rs index 5384183f9e678..b20cf45cd43ad 100644 --- a/primitives/consensus/babe/src/inherents.rs +++ b/primitives/consensus/babe/src/inherents.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,7 +31,7 @@ use sp_std::result::Result; pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"babeslot"; /// The type of the BABE inherent. -pub type InherentType = u64; +pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract BABE inherent data. pub trait BabeInherentData { /// Get BABE inherent data. @@ -82,8 +82,8 @@ impl ProvideInherentData for InherentDataProvider { fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_number = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_number) + let slot = timestamp / self.slot_duration; + inherent_data.put_data(INHERENT_IDENTIFIER, &slot) } fn error_to_string(&self, error: &[u8]) -> Option { diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 74f2659e6e8b2..6987796c114a0 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -76,8 +76,7 @@ pub const MEDIAN_ALGORITHM_CARDINALITY: usize = 1200; // arbitrary suggestion by /// The index of an authority. pub type AuthorityIndex = u32; -/// A slot number. -pub use sp_consensus_slots::SlotNumber; +pub use sp_consensus_slots::Slot; /// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). pub type EquivocationProof = sp_consensus_slots::EquivocationProof; @@ -93,11 +92,11 @@ pub type BabeBlockWeight = u32; /// Make a VRF transcript from given randomness, slot number and epoch. pub fn make_transcript( randomness: &Randomness, - slot_number: u64, + slot: Slot, epoch: u64, ) -> Transcript { let mut transcript = Transcript::new(&BABE_ENGINE_ID); - transcript.append_u64(b"slot number", slot_number); + transcript.append_u64(b"slot number", *slot); transcript.append_u64(b"current epoch", epoch); transcript.append_message(b"chain randomness", &randomness[..]); transcript @@ -107,13 +106,13 @@ pub fn make_transcript( #[cfg(feature = "std")] pub fn make_transcript_data( randomness: &Randomness, - slot_number: u64, + slot: Slot, epoch: u64, ) -> VRFTranscriptData { VRFTranscriptData { label: &BABE_ENGINE_ID, items: vec![ - ("slot number", VRFTranscriptValue::U64(slot_number)), + ("slot number", VRFTranscriptValue::U64(*slot)), ("current epoch", VRFTranscriptValue::U64(epoch)), ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), ] @@ -126,14 +125,14 @@ pub enum ConsensusLog { /// The epoch has changed. This provides information about the _next_ /// epoch - information about the _current_ epoch (i.e. the one we've just /// entered) should already be available earlier in the chain. - #[codec(index = "1")] + #[codec(index = 1)] NextEpochData(NextEpochDescriptor), /// Disable the authority with given index. - #[codec(index = "2")] + #[codec(index = 2)] OnDisabled(AuthorityIndex), /// The epoch has changed, and the epoch after the current one will /// enact different epoch configurations. - #[codec(index = "3")] + #[codec(index = 3)] NextConfigData(NextConfigDescriptor), } @@ -147,7 +146,7 @@ pub struct BabeGenesisConfigurationV1 { pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: SlotNumber, + pub epoch_length: u64, /// A constant value that is used in the threshold calculation formula. /// Expressed as a rational where the first member of the tuple is the @@ -195,7 +194,7 @@ pub struct BabeGenesisConfiguration { pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: SlotNumber, + pub epoch_length: u64, /// A constant value that is used in the threshold calculation formula. /// Expressed as a rational where the first member of the tuple is the @@ -303,8 +302,8 @@ where // both headers must be targetting the same slot and it must // be the same as the one in the proof. - if proof.slot_number != first_pre_digest.slot_number() || - first_pre_digest.slot_number() != second_pre_digest.slot_number() + if proof.slot != first_pre_digest.slot() || + first_pre_digest.slot() != second_pre_digest.slot() { return None; } @@ -350,6 +349,21 @@ impl OpaqueKeyOwnershipProof { } } +/// BABE epoch information +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// The duration of this epoch. + pub duration: u64, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)] @@ -361,22 +375,29 @@ sp_api::decl_runtime_apis! { #[changed_in(2)] fn configuration() -> BabeGenesisConfigurationV1; - /// Returns the slot number that started the current epoch. - fn current_epoch_start() -> SlotNumber; + /// Returns the slot that started the current epoch. + fn current_epoch_start() -> Slot; + + /// Returns information regarding the current epoch. + fn current_epoch() -> Epoch; + + /// Returns information regarding the next epoch (which was already + /// previously announced). + fn next_epoch() -> Epoch; /// Generates a proof of key ownership for the given authority in the /// current epoch. An example usage of this module is coupled with the /// session historical module to prove that a given authority key is /// tied to a given staking identity during a specific session. Proofs /// of key ownership are necessary for submitting equivocation reports. - /// NOTE: even though the API takes a `slot_number` as parameter the current + /// NOTE: even though the API takes a `slot` as parameter the current /// implementations ignores this parameter and instead relies on this /// method being called at the correct block height, i.e. any point at /// which the epoch for the given slot is live on-chain. Future /// implementations will instead use indexed data through an offchain /// worker, not requiring older states to be available. fn generate_key_ownership_proof( - slot_number: SlotNumber, + slot: Slot, authority_id: AuthorityId, ) -> Option; diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index db85244dcfa83..44202678990f8 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,27 +16,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.21" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.34.0", default-features = false } log = "0.4.8" -sp-core = { path= "../../core", version = "2.0.0"} -sp-inherents = { version = "2.0.0", path = "../../inherents" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-core = { path= "../../core", version = "3.0.0"} +sp-inherents = { version = "3.0.0", path = "../../inherents" } +sp-state-machine = { version = "0.9.0", path = "../../state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" -sp-std = { version = "2.0.0", path = "../../std" } -sp-version = { version = "2.0.0", path = "../../version" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-utils = { version = "2.0.0", path = "../../utils" } -sp-trie = { version = "2.0.0", path = "../../trie" } -sp-api = { version = "2.0.0", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -parking_lot = "0.10.0" +sp-std = { version = "3.0.0", path = "../../std" } +sp-version = { version = "3.0.0", path = "../../version" } +sp-runtime = { version = "3.0.0", path = "../../runtime" } +sp-utils = { version = "3.0.0", path = "../../utils" } +sp-trie = { version = "3.0.0", path = "../../trie" } +sp-api = { version = "3.0.0", path = "../../api" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} -wasm-timer = "0.2.4" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +wasm-timer = "0.2.5" [dev-dependencies] -futures = "0.3.4" +futures = "0.3.9" sp-test-primitives = { version = "2.0.0", path = "../../test-primitives" } [features] diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 5e593da1163d7..41b5f391f65ca 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ use std::sync::Arc; use std::any::Any; use crate::Error; -use crate::import_queue::{Verifier, CacheKeyId}; +use crate::import_queue::CacheKeyId; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -54,8 +54,6 @@ pub struct ImportedAux { pub needs_justification: bool, /// Received a bad justification. pub bad_justification: bool, - /// Request a finality proof for the given block. - pub needs_finality_proof: bool, /// Whether the block that was imported is the new best block. pub is_new_best: bool, } @@ -63,7 +61,7 @@ pub struct ImportedAux { impl ImportResult { /// Returns default value for `ImportResult::Imported` with /// `clear_justification_requests`, `needs_justification`, - /// `bad_justification` and `needs_finality_proof` set to false. + /// `bad_justification` set to false. pub fn imported(is_new_best: bool) -> ImportResult { let mut aux = ImportedAux::default(); aux.is_new_best = is_new_best; @@ -345,21 +343,3 @@ pub trait JustificationImport { justification: Justification, ) -> Result<(), Self::Error>; } - -/// Finality proof import trait. -pub trait FinalityProofImport { - type Error: std::error::Error + Send + 'static; - - /// Called by the import queue when it is started. Returns a list of finality proofs to request - /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } - - /// Import a Block justification and finalize the given block. Returns finalized block or error. - fn import_finality_proof( - &mut self, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(B::Hash, NumberFor), Self::Error>; -} diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index f8255130e6416..fb0846fe9901a 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// http://www.apache.org/licenses/LICENSE-2.0 // -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Block announcement validation. @@ -42,7 +43,12 @@ pub enum Validation { is_new_best: bool, }, /// Invalid block announcement. - Failure, + Failure { + /// Should we disconnect from this peer? + /// + /// This should be used if the peer for example send junk to spam us. + disconnect: bool, + }, } /// Type which checks incoming block announcements. @@ -53,6 +59,10 @@ pub trait BlockAnnounceValidator { /// /// Returning [`Validation::Failure`] will lead to a decrease of the /// peers reputation as it sent us invalid data. + /// + /// The returned future should only resolve to an error iff there was an internal error validating + /// the block announcement. If the block announcement itself is invalid, this should *always* + /// return [`Validation::Failure`]. fn validate( &mut self, header: &B::Header, @@ -68,8 +78,20 @@ impl BlockAnnounceValidator for DefaultBlockAnnounceValidator { fn validate( &mut self, _: &B::Header, - _: &[u8], + data: &[u8], ) -> Pin>> + Send>> { - async { Ok(Validation::Success { is_new_best: false }) }.boxed() + let is_empty = data.is_empty(); + + async move { + if !is_empty { + log::debug!( + target: "sync", + "Received unknown data alongside the block announcement.", + ); + Ok(Validation::Failure { disconnect: true }) + } else { + Ok(Validation::Success { is_new_best: false }) + } + }.boxed() } } diff --git a/primitives/consensus/common/src/error.rs b/primitives/consensus/common/src/error.rs index a21bcf6cca9b2..d7461fe92032e 100644 --- a/primitives/consensus/common/src/error.rs +++ b/primitives/consensus/common/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,6 +25,7 @@ pub type Result = std::result::Result; /// Error type. #[derive(Debug, thiserror::Error)] +#[non_exhaustive] pub enum Error { /// Missing state at block with given descriptor. #[error("State unavailable at block {0}")] diff --git a/primitives/consensus/common/src/evaluation.rs b/primitives/consensus/common/src/evaluation.rs index edb148cdaa991..be930fa4a0016 100644 --- a/primitives/consensus/common/src/evaluation.rs +++ b/primitives/consensus/common/src/evaluation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,6 @@ //! Block evaluation and evaluation errors. -use super::MAX_BLOCK_SIZE; - use codec::Encode; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, One, CheckedConversion}; @@ -42,11 +40,8 @@ pub enum Error { #[error("Proposal had wrong number. Expected {expected:?}, got {got:?}")] WrongNumber { expected: BlockNumber, got: BlockNumber }, /// Proposal exceeded the maximum size. - #[error( - "Proposal exceeded the maximum size of {} by {} bytes.", - MAX_BLOCK_SIZE, .0.saturating_sub(MAX_BLOCK_SIZE) - )] - ProposalTooLarge(usize), + #[error("Proposal size {block_size} exceeds maximum allowed size of {max_block_size}.")] + ProposalTooLarge { block_size: usize, max_block_size: usize }, } /// Attempt to evaluate a substrate block as a node block, returning error @@ -55,28 +50,29 @@ pub fn evaluate_initial( proposal: &Block, parent_hash: &::Hash, parent_number: <::Header as HeaderT>::Number, + max_block_size: usize, ) -> Result<()> { let encoded = Encode::encode(proposal); let proposal = Block::decode(&mut &encoded[..]) .map_err(|e| Error::BadProposalFormat(e))?; - if encoded.len() > MAX_BLOCK_SIZE { - return Err(Error::ProposalTooLarge(encoded.len())) + if encoded.len() > max_block_size { + return Err(Error::ProposalTooLarge { max_block_size, block_size: encoded.len() }) } if *parent_hash != *proposal.header().parent_hash() { return Err(Error::WrongParentHash { expected: format!("{:?}", *parent_hash), got: format!("{:?}", proposal.header().parent_hash()) - }); + }) } if parent_number + One::one() != *proposal.header().number() { return Err(Error::WrongNumber { expected: parent_number.checked_into::().map(|x| x + 1), got: (*proposal.header().number()).checked_into::(), - }); + }) } Ok(()) diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 92bd9966d75ec..83f6271941fab 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +34,7 @@ use crate::{ error::Error as ConsensusError, block_import::{ BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, FinalityProofImport, + BlockCheckParams, }, metrics::Metrics, }; @@ -56,11 +56,6 @@ pub type BoxBlockImport = Box< /// Shared justification import struct used by the queue. pub type BoxJustificationImport = Box + Send + Sync>; -/// Shared finality proof import struct used by the queue. -pub type BoxFinalityProofImport = Box< - dyn FinalityProofImport + Send + Sync ->; - /// Maps to the Origin used by the network. pub type Origin = libp2p::PeerId; @@ -115,15 +110,6 @@ pub trait ImportQueue: Send { number: NumberFor, justification: Justification ); - /// Import block finality proof. - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ); - /// Polls for actions to perform on the network. /// /// This method should behave in a way similar to `Future::poll`. It can register the current @@ -146,26 +132,13 @@ pub trait Link: Send { fn justification_imported(&mut self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} /// Request a justification for the given block. fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} - /// Finality proof import result. - /// - /// Even though we have asked for finality proof of block A, provider could return proof of - /// some earlier block B, if the proof for A was too large. The sync module should continue - /// asking for proof of A in this case. - fn finality_proof_imported( - &mut self, - _who: Origin, - _request_block: (B::Hash, NumberFor), - _finalization_result: Result<(B::Hash, NumberFor), ()>, - ) {} - /// Request a finality proof for the given block. - fn request_finality_proof(&mut self, _hash: &B::Hash, _number: NumberFor) {} } /// Block import successful result. #[derive(Debug, PartialEq)] -pub enum BlockImportResult { +pub enum BlockImportResult { /// Imported known block. - ImportedKnown(N), + ImportedKnown(N, Option), /// Imported unknown block. ImportedUnknown(N, ImportedAux, Option), } @@ -231,7 +204,7 @@ pub(crate) fn import_single_block_metered, Transaction match import { Ok(ImportResult::AlreadyInChain) => { trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number)) + Ok(BlockImportResult::ImportedKnown(number, peer.clone())) }, Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), Ok(ImportResult::MissingState) => { diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index ea0ca2cf3ee88..541c1ff0f4ed7 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{mem, pin::Pin, time::Duration, marker::PhantomData}; +use std::{pin::Pin, time::Duration, marker::PhantomData}; use futures::{prelude::*, task::Context, task::Poll}; use futures_timer::Delay; use sp_runtime::{Justification, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; -use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; +use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded, TracingUnboundedReceiver}; use prometheus_endpoint::Registry; use crate::{ block_import::BlockOrigin, import_queue::{ - BlockImportResult, BlockImportError, Verifier, BoxBlockImport, BoxFinalityProofImport, + BlockImportResult, BlockImportError, Verifier, BoxBlockImport, BoxJustificationImport, ImportQueue, Link, Origin, IncomingBlock, import_single_block_metered, buffered_link::{self, BufferedLinkSender, BufferedLinkReceiver}, @@ -36,8 +36,8 @@ use crate::{ /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. pub struct BasicQueue { - /// Channel to send finality work messages to the background task. - finality_sender: TracingUnboundedSender>, + /// Channel to send justifcation import messages to the background task. + justification_sender: TracingUnboundedSender>, /// Channel to send block import messages to the background task. block_import_sender: TracingUnboundedSender>, /// Results coming from the worker task. @@ -48,7 +48,7 @@ pub struct BasicQueue { impl Drop for BasicQueue { fn drop(&mut self) { // Flush the queue and close the receiver to terminate the future. - self.finality_sender.close_channel(); + self.justification_sender.close_channel(); self.block_import_sender.close_channel(); self.result_port.close(); } @@ -57,13 +57,11 @@ impl Drop for BasicQueue { impl BasicQueue { /// Instantiate a new basic queue, with given verifier. /// - /// This creates a background task, and calls `on_start` on the justification importer and - /// finality proof importer. + /// This creates a background task, and calls `on_start` on the justification importer. pub fn new>( verifier: V, block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, spawner: &impl sp_core::traits::SpawnNamed, prometheus_registry: Option<&Registry>, ) -> Self { @@ -77,19 +75,18 @@ impl BasicQueue { .ok() }); - let (future, finality_sender, block_import_sender) = BlockImportWorker::new( + let (future, justification_sender, block_import_sender) = BlockImportWorker::new( result_sender, verifier, block_import, justification_import, - finality_proof_import, metrics, ); spawner.spawn_blocking("basic-block-import-worker", future.boxed()); Self { - finality_sender, + justification_sender, block_import_sender, result_port, _phantom: PhantomData, @@ -122,8 +119,8 @@ impl ImportQueue for BasicQueue number: NumberFor, justification: Justification, ) { - let res = self.finality_sender.unbounded_send( - worker_messages::Finality::ImportJustification(who, hash, number, justification), + let res = self.justification_sender.unbounded_send( + worker_messages::ImportJustification(who, hash, number, justification), ); if res.is_err() { @@ -134,26 +131,6 @@ impl ImportQueue for BasicQueue } } - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - ) { - trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); - let res = self.finality_sender.unbounded_send( - worker_messages::Finality::ImportFinalityProof(who, hash, number, finality_proof), - ); - - if res.is_err() { - log::error!( - target: "sync", - "import_finality_proof: Background import task is no longer alive" - ); - } - } - fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { if self.result_port.poll_actions(cx, link).is_err() { log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); @@ -166,192 +143,121 @@ mod worker_messages { use super::*; pub struct ImportBlocks(pub BlockOrigin, pub Vec>); + pub struct ImportJustification(pub Origin, pub B::Hash, pub NumberFor, pub Justification); +} - pub enum Finality { - ImportJustification(Origin, B::Hash, NumberFor, Justification), - ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), +/// The process of importing blocks. +/// +/// This polls the `block_import_receiver` for new blocks to import and than awaits on importing these blocks. +/// After each block is imported, this async function yields once to give other futures the possibility +/// to be run. +/// +/// Returns when `block_import` ended. +async fn block_import_process( + mut block_import: BoxBlockImport, + mut verifier: impl Verifier, + mut result_sender: BufferedLinkSender, + mut block_import_receiver: TracingUnboundedReceiver>, + metrics: Option, + delay_between_blocks: Duration, +) { + loop { + let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await { + Some(blocks) => blocks, + None => return, + }; + + let res = import_many_blocks( + &mut block_import, + origin, + blocks, + &mut verifier, + delay_between_blocks, + metrics.clone(), + ).await; + + result_sender.blocks_processed(res.imported, res.block_count, res.results); } } -struct BlockImportWorker { +struct BlockImportWorker { result_sender: BufferedLinkSender, justification_import: Option>, - finality_proof_import: Option>, - delay_between_blocks: Duration, metrics: Option, - _phantom: PhantomData, } -impl BlockImportWorker { - fn new>( +impl BlockImportWorker { + fn new, Transaction: Send>( result_sender: BufferedLinkSender, verifier: V, block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, metrics: Option, ) -> ( impl Future + Send, - TracingUnboundedSender>, + TracingUnboundedSender>, TracingUnboundedSender>, ) { use worker_messages::*; - let (finality_sender, mut finality_port) = - tracing_unbounded("mpsc_import_queue_worker_finality"); + let (justification_sender, mut justification_port) = + tracing_unbounded("mpsc_import_queue_worker_justification"); - let (block_import_sender, mut block_import_port) = + let (block_import_sender, block_import_port) = tracing_unbounded("mpsc_import_queue_worker_blocks"); let mut worker = BlockImportWorker { result_sender, justification_import, - finality_proof_import, - delay_between_blocks: Duration::new(0, 0), metrics, - _phantom: PhantomData, }; - // Let's initialize `justification_import` and `finality_proof_import`. + // Let's initialize `justification_import` if let Some(justification_import) = worker.justification_import.as_mut() { for (hash, number) in justification_import.on_start() { worker.result_sender.request_justification(&hash, number); } } - if let Some(finality_proof_import) = worker.finality_proof_import.as_mut() { - for (hash, number) in finality_proof_import.on_start() { - worker.result_sender.request_finality_proof(&hash, number); - } - } - // The future below has two possible states: - // - // - Currently importing many blocks, in which case `importing` is `Some` and contains a - // `Future`, and `block_import` is `None`. - // - Something else, in which case `block_import` is `Some` and `importing` is None. - // - // Additionally, the task will prioritize processing of finality work messages over - // block import messages, hence why two distinct channels are used. - let mut block_import_verifier = Some((block_import, verifier)); - let mut importing = None; - - let future = futures::future::poll_fn(move |cx| { + let delay_between_blocks = Duration::default(); + + let future = async move { + let block_import_process = block_import_process( + block_import, + verifier, + worker.result_sender.clone(), + block_import_port, + worker.metrics.clone(), + delay_between_blocks, + ); + futures::pin_mut!(block_import_process); + loop { // If the results sender is closed, that means that the import queue is shutting // down and we should end this future. if worker.result_sender.is_closed() { - return Poll::Ready(()) + return; } - // Grab the next finality action request sent to the import queue. - let finality_work = match Stream::poll_next(Pin::new(&mut finality_port), cx) { - Poll::Ready(Some(msg)) => Some(msg), - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => None, - }; - - match finality_work { - Some(Finality::ImportFinalityProof(who, hash, number, proof)) => { - let (_, verif) = block_import_verifier - .as_mut() - .expect("block_import_verifier is always Some; qed"); - - worker.import_finality_proof(verif, who, hash, number, proof); - continue; - } - Some(Finality::ImportJustification(who, hash, number, justification)) => { - worker.import_justification(who, hash, number, justification); - continue; + // Make sure to first process all justifications + while let Poll::Ready(justification) = futures::poll!(justification_port.next()) { + match justification { + Some(ImportJustification(who, hash, number, justification)) => + worker.import_justification(who, hash, number, justification), + None => return, } - None => {} } - // If we are in the process of importing a bunch of blocks, let's resume this - // process before doing anything more. - if let Some(imp_fut) = importing.as_mut() { - match Future::poll(Pin::new(imp_fut), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready((bi, verif)) => { - block_import_verifier = Some((bi, verif)); - importing = None; - }, - } + if let Poll::Ready(()) = futures::poll!(&mut block_import_process) { + return; } - debug_assert!(importing.is_none()); - debug_assert!(block_import_verifier.is_some()); - - // Grab the next block import request sent to the import queue. - let ImportBlocks(origin, blocks) = - match Stream::poll_next(Pin::new(&mut block_import_port), cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => return Poll::Pending, - }; - - // On blocks import request, we merely *start* the process and store - // a `Future` into `importing`. - let (block_import, verifier) = block_import_verifier - .take() - .expect("block_import_verifier is always Some; qed"); - - importing = Some(worker.import_batch(block_import, verifier, origin, blocks)); + // All futures that we polled are now pending. + futures::pending!() } - }); - - (future, finality_sender, block_import_sender) - } - - /// Returns a `Future` that imports the given blocks and sends the results on - /// `self.result_sender`. - /// - /// For lifetime reasons, the `BlockImport` implementation must be passed by value, and is - /// yielded back in the output once the import is finished. - fn import_batch>( - &mut self, - block_import: BoxBlockImport, - verifier: V, - origin: BlockOrigin, - blocks: Vec>, - ) -> impl Future, V)> { - let mut result_sender = self.result_sender.clone(); - let metrics = self.metrics.clone(); - - import_many_blocks(block_import, origin, blocks, verifier, self.delay_between_blocks, metrics) - .then(move |(imported, count, results, block_import, verifier)| { - result_sender.blocks_processed(imported, count, results); - future::ready((block_import, verifier)) - }) - } - - fn import_finality_proof>( - &mut self, - verifier: &mut V, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ) { - let started = wasm_timer::Instant::now(); - let result = self.finality_proof_import.as_mut().map(|finality_proof_import| { - finality_proof_import.import_finality_proof(hash, number, finality_proof, verifier) - .map_err(|e| { - debug!( - "Finality proof import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", - e, - hash, - number, - who, - ); - }) - }).unwrap_or(Err(())); - - if let Some(metrics) = self.metrics.as_ref() { - metrics.finality_proof_import_time.observe(started.elapsed().as_secs_f64()); - } + }; - trace!(target: "sync", "Imported finality proof for {}/{}", number, hash); - self.result_sender.finality_proof_imported(who, (hash, number), result); + (future, justification_sender, block_import_sender) } fn import_justification( @@ -385,29 +291,27 @@ impl BlockImportWorker { } } +/// Result of [`import_many_blocks`]. +struct ImportManyBlocksResult { + /// The number of blocks imported successfully. + imported: usize, + /// The total number of blocks processed. + block_count: usize, + /// The import results for each block. + results: Vec<(Result>, BlockImportError>, B::Hash)>, +} + /// Import several blocks at once, returning import result for each block. /// -/// For lifetime reasons, the `BlockImport` implementation must be passed by value, and is yielded -/// back in the output once the import is finished. -/// -/// The returned `Future` yields at every imported block, which makes the execution more -/// fine-grained and making it possible to interrupt the process. -fn import_many_blocks, Transaction>( - import_handle: BoxBlockImport, +/// This will yield after each imported block once, to ensure that other futures can be called as well. +async fn import_many_blocks, Transaction>( + import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, blocks: Vec>, - verifier: V, + verifier: &mut V, delay_between_blocks: Duration, metrics: Option, -) -> impl Future< - Output = ( - usize, - usize, - Vec<(Result>, BlockImportError>, B::Hash)>, - BoxBlockImport, - V, - ), -> { +) -> ImportManyBlocksResult { let count = blocks.len(); let blocks_range = match ( @@ -425,44 +329,18 @@ fn import_many_blocks, Transaction>( let mut results = vec![]; let mut has_error = false; let mut blocks = blocks.into_iter(); - let mut import_handle = Some(import_handle); - let mut waiting = None; - let mut verifier = Some(verifier); // Blocks in the response/drain should be in ascending order. - - future::poll_fn(move |cx| { - // Handle the optional timer that makes us wait before the next import. - if let Some(waiting) = &mut waiting { - match Future::poll(Pin::new(waiting), cx) { - Poll::Ready(_) => {}, - Poll::Pending => return Poll::Pending, - } - } - waiting = None; - + loop { // Is there any block left to import? let block = match blocks.next() { Some(b) => b, None => { // No block left to import, success! - let import_handle = import_handle.take() - .expect("Future polled again after it has finished (import handle is None)"); - let verifier = verifier.take() - .expect("Future polled again after it has finished (verifier handle is None)"); - let results = mem::replace(&mut results, Vec::new()); - return Poll::Ready((imported, count, results, import_handle, verifier)); + return ImportManyBlocksResult { block_count: count, imported, results } }, }; - // We extract the content of `import_handle` and `verifier` only when the future ends, - // therefore `import_handle` and `verifier` are always `Some` here. It is illegal to poll - // a `Future` again after it has ended. - let import_handle = import_handle.as_mut() - .expect("Future polled again after it has finished (import handle is None)"); - let verifier = verifier.as_mut() - .expect("Future polled again after it has finished (verifier handle is None)"); - let block_number = block.header.as_ref().map(|h| h.number().clone()); let block_hash = block.hash; let import_result = if has_error { @@ -470,7 +348,7 @@ fn import_many_blocks, Transaction>( } else { // The actual import. import_single_block_metered( - &mut **import_handle, + import_handle, blocks_origin.clone(), block, verifier, @@ -483,7 +361,12 @@ fn import_many_blocks, Transaction>( } if import_result.is_ok() { - trace!(target: "sync", "Block imported successfully {:?} ({})", block_number, block_hash); + trace!( + target: "sync", + "Block imported successfully {:?} ({})", + block_number, + block_hash, + ); imported += 1; } else { has_error = true; @@ -491,14 +374,40 @@ fn import_many_blocks, Transaction>( results.push((import_result, block_hash)); - // Notifies the current task again so that we re-execute this closure again for the next - // block. - if delay_between_blocks != Duration::new(0, 0) { - waiting = Some(Delay::new(delay_between_blocks)); + if delay_between_blocks != Duration::default() && !has_error { + Delay::new(delay_between_blocks).await; + } else { + Yield::new().await + } + } +} + +/// A future that will always `yield` on the first call of `poll` but schedules the current task for +/// re-execution. + +/// +/// This is done by getting the waker and calling `wake_by_ref` followed by returning `Pending`. +/// The next time the `poll` is called, it will return `Ready`. +struct Yield(bool); + +impl Yield { + fn new() -> Self { + Self(false) + } +} + +impl Future for Yield { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + if !self.0 { + self.0 = true; + cx.waker().wake_by_ref(); + Poll::Pending + } else { + Poll::Ready(()) } - cx.waker().wake_by_ref(); - Poll::Pending - }) + } } #[cfg(test)] @@ -595,8 +504,9 @@ mod tests { fn prioritizes_finality_work_over_block_import() { let (result_sender, mut result_port) = buffered_link::buffered_link(); - let (mut worker, mut finality_sender, mut block_import_sender) = - BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None, None); + let (worker, mut finality_sender, mut block_import_sender) = + BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None); + futures::pin_mut!(worker); let mut import_block = |n| { let header = Header { @@ -629,7 +539,7 @@ mod tests { let mut import_justification = || { let hash = Hash::random(); - block_on(finality_sender.send(worker_messages::Finality::ImportJustification( + block_on(finality_sender.send(worker_messages::ImportJustification( libp2p::PeerId::random(), hash, 1, diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/primitives/consensus/common/src/import_queue/buffered_link.rs index a37d4c53c2603..0295f704c4efc 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/primitives/consensus/common/src/import_queue/buffered_link.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -81,8 +81,6 @@ enum BlockImportWorkerMsg { BlocksProcessed(usize, usize, Vec<(Result>, BlockImportError>, B::Hash)>), JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), - FinalityProofImported(Origin, (B::Hash, NumberFor), Result<(B::Hash, NumberFor), ()>), - RequestFinalityProof(B::Hash, NumberFor), } impl Link for BufferedLinkSender { @@ -109,20 +107,6 @@ impl Link for BufferedLinkSender { fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); } - - fn finality_proof_imported( - &mut self, - who: Origin, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let msg = BlockImportWorkerMsg::FinalityProofImported(who, request_block, finalization_result); - let _ = self.tx.unbounded_send(msg); - } - - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestFinalityProof(hash.clone(), number)); - } } /// See [`buffered_link`]. @@ -154,10 +138,6 @@ impl BufferedLinkReceiver { link.justification_imported(who, &hash, number, success), BlockImportWorkerMsg::RequestJustification(hash, number) => link.request_justification(&hash, number), - BlockImportWorkerMsg::FinalityProofImported(who, block, result) => - link.finality_proof_imported(who, block, result), - BlockImportWorkerMsg::RequestFinalityProof(hash, number) => - link.request_finality_proof(&hash, number), } } } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 47de0674115c2..43edf4f7776c2 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate Consensus Common. - -// Substrate Demo is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate Consensus Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate Consensus Common. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Common utilities for building and using consensus engines in substrate. //! @@ -46,13 +47,10 @@ pub mod import_queue; pub mod evaluation; mod metrics; -// block size limit. -const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; - pub use self::error::Error; pub use block_import::{ BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, BlockCheckParams, - ImportResult, JustificationImport, FinalityProofImport, + ImportResult, JustificationImport, }; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; diff --git a/primitives/consensus/common/src/metrics.rs b/primitives/consensus/common/src/metrics.rs index a35b7c4968f7f..29d39436cbefc 100644 --- a/primitives/consensus/common/src/metrics.rs +++ b/primitives/consensus/common/src/metrics.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Metering tools for consensus @@ -30,7 +31,6 @@ pub(crate) struct Metrics { pub import_queue_processed: CounterVec, pub block_verification_time: HistogramVec, pub block_verification_and_import_time: Histogram, - pub finality_proof_import_time: Histogram, pub justification_import_time: Histogram, } @@ -63,15 +63,6 @@ impl Metrics { )?, registry, )?, - finality_proof_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "finality_proof_import_time", - "Time taken to import finality proofs", - ), - )?, - registry, - )?, justification_import_time: register( Histogram::with_opts( HistogramOpts::new( diff --git a/primitives/consensus/common/src/offline_tracker.rs b/primitives/consensus/common/src/offline_tracker.rs index b96498041f25d..8e33a2c449e35 100644 --- a/primitives/consensus/common/src/offline_tracker.rs +++ b/primitives/consensus/common/src/offline_tracker.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index fe0d3972043b9..11f6fbeb54d37 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate Consensus Common. - -// Substrate Demo is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate Consensus Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate Consensus Common. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::error::Error; use sp_runtime::traits::{Block as BlockT, NumberFor}; diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index cbcea886a7095..850f0efe47ed3 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-pow" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-api = { version = "3.0.0", default-features = false, path = "../../api" } +sp-std = { version = "3.0.0", default-features = false, path = "../../std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtime" } +sp-core = { version = "3.0.0", default-features = false, path = "../../core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/primitives/consensus/pow/src/lib.rs b/primitives/consensus/pow/src/lib.rs index 79c9b6f16c3bd..12d3440ea9d54 100644 --- a/primitives/consensus/pow/src/lib.rs +++ b/primitives/consensus/pow/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index e605d585b7229..46dbaca1a6ad4 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-slots" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Primitives for slots-based consensus" edition = "2018" @@ -13,12 +13,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtime" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../arithmetic" } [features] default = ["std"] std = [ "codec/std", "sp-runtime/std", + "sp-arithmetic/std", ] diff --git a/primitives/consensus/slots/src/lib.rs b/primitives/consensus/slots/src/lib.rs index f898cf9da6e2a..545d18af1f9be 100644 --- a/primitives/consensus/slots/src/lib.rs +++ b/primitives/consensus/slots/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,76 @@ use codec::{Decode, Encode}; -/// A slot number. -pub type SlotNumber = u64; +/// Unit type wrapper that represents a slot. +#[derive(Debug, Encode, Decode, Eq, Clone, Copy, Default, Ord)] +pub struct Slot(u64); + +impl core::ops::Deref for Slot { + type Target = u64; + + fn deref(&self) -> &u64 { + &self.0 + } +} + +impl core::ops::Add for Slot { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self(self.0 + other.0) + } +} + +impl core::ops::Add for Slot { + type Output = Self; + + fn add(self, other: u64) -> Self { + Self(self.0 + other) + } +} + +impl + Copy> core::cmp::PartialEq for Slot { + fn eq(&self, eq: &T) -> bool { + self.0 == (*eq).into() + } +} + +impl + Copy> core::cmp::PartialOrd for Slot { + fn partial_cmp(&self, other: &T) -> Option { + self.0.partial_cmp(&(*other).into()) + } +} + +impl Slot { + /// Saturating addition. + pub fn saturating_add>(self, rhs: T) -> Self { + Self(self.0.saturating_add(rhs.into())) + } + + /// Saturating subtraction. + pub fn saturating_sub>(self, rhs: T) -> Self { + Self(self.0.saturating_sub(rhs.into())) + } +} + +#[cfg(feature = "std")] +impl std::fmt::Display for Slot { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for Slot { + fn from(slot: u64) -> Slot { + Slot(slot) + } +} + +impl From for u64 { + fn from(slot: Slot) -> u64 { + slot.0 + } +} /// Represents an equivocation proof. An equivocation happens when a validator /// produces more than one block on the same slot. The proof of equivocation @@ -32,8 +100,8 @@ pub type SlotNumber = u64; pub struct EquivocationProof { /// Returns the authority id of the equivocator. pub offender: Id, - /// The slot number at which the equivocation happened. - pub slot_number: SlotNumber, + /// The slot at which the equivocation happened. + pub slot: Slot, /// The first header involved in the equivocation. pub first_header: Header, /// The second header involved in the equivocation. diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index d0b7d2e2f7aa8..15a9318cd4461 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-vrf" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] description = "Primitives for VRF based consensus" edition = "2018" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "1.0.0", package = "parity-scale-codec", default-features = false } +codec = { version = "2.0.0", package = "parity-scale-codec", default-features = false } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } -sp-std = { version = "2.0.0", path = "../../std", default-features = false } -sp-core = { version = "2.0.0", path = "../../core", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "3.0.0", path = "../../std", default-features = false } +sp-core = { version = "3.0.0", path = "../../core", default-features = false } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtime" } [features] default = ["std"] diff --git a/primitives/consensus/vrf/src/lib.rs b/primitives/consensus/vrf/src/lib.rs index 430e11974bcd4..19391c6c1c84f 100644 --- a/primitives/consensus/vrf/src/lib.rs +++ b/primitives/consensus/vrf/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index 65e68375865d0..400bdb2f58088 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 1757bb4e0d520..3d9cf1287e051 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,12 +13,12 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.11", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } -primitive-types = { version = "0.7.0", default-features = false, features = ["codec"] } +primitive-types = { version = "0.9.0", default-features = false, features = ["codec"] } impl-serde = { version = "0.3.0", optional = true } wasmi = { version = "0.6.2", optional = true } hash-db = { version = "0.15.2", default-features = false } @@ -26,17 +26,17 @@ hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.2", optional = true } -tiny-bip39 = { version = "0.7", optional = true } -regex = { version = "1.3.1", optional = true } +tiny-bip39 = { version = "0.8", optional = true } +regex = { version = "1.4.2", optional = true } num-traits = { version = "0.2.8", default-features = false } -zeroize = { version = "1.0.0", default-features = false } -secrecy = { version = "0.6.0", default-features = false } +zeroize = { version = "1.2.0", default-features = false } +secrecy = { version = "0.7.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } -parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +parking_lot = { version = "0.11.1", optional = true } +sp-debug-derive = { version = "3.0.0", path = "../debug-derive" } +sp-externalities = { version = "0.9.0", optional = true, path = "../externalities" } +sp-storage = { version = "3.0.0", default-features = false, path = "../storage" } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.1", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.21", optional = true } @@ -46,16 +46,16 @@ ed25519-dalek = { version = "1.0.1", default-features = false, features = ["u64_ blake2-rfc = { version = "0.2.18", default-features = false, optional = true } tiny-keccak = { version = "2.0.1", features = ["keccak"], optional = true } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false, optional = true } -sha2 = { version = "0.8.0", default-features = false, optional = true } +sha2 = { version = "0.9.2", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.5.0", default-features = false, optional = true } libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "3.0.0", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0", path = "../serializer" } +sp-serializer = { version = "3.0.0", path = "../serializer" } pretty_assertions = "0.6.1" hex-literal = "0.3.1" rand = "0.7.2" @@ -96,11 +96,11 @@ std = [ "base58", "substrate-bip39", "tiny-bip39", - "serde", "byteorder/std", "rand", "sha2/std", "schnorrkel/std", + "schnorrkel/serde", "regex", "num-traits/std", "tiny-keccak", diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs index 1d88242e43d69..32991ce44a506 100644 --- a/primitives/core/src/changes_trie.rs +++ b/primitives/core/src/changes_trie.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 6606b88887693..2c375f68eb685 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -210,7 +210,7 @@ pub enum PublicError { BadBase58, /// Bad length. BadLength, - /// Unknown version. + /// Unknown identifier for the encoding. UnknownVersion, /// Invalid checksum. InvalidChecksum, @@ -218,11 +218,22 @@ pub enum PublicError { InvalidFormat, /// Invalid derivation path. InvalidPath, + /// Disallowed SS58 Address Format for this datatype. + FormatNotAllowed, } /// Key that can be encoded to/from SS58. +/// +/// See https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)#address-type +/// for information on the codec. #[cfg(feature = "full_crypto")] pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { + /// A format filterer, can be used to ensure that `from_ss58check` family only decode for + /// allowed identifiers. By default just refuses the two reserved identifiers. + fn format_is_allowed(f: Ss58AddressFormat) -> bool { + !matches!(f, Ss58AddressFormat::Reserved46 | Ss58AddressFormat::Reserved47) + } + /// Some if the string is a properly encoded SS58Check address. #[cfg(feature = "std")] fn from_ss58check(s: &str) -> Result { @@ -233,25 +244,46 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { _ => Err(PublicError::UnknownVersion), }) } + /// Some if the string is a properly encoded SS58Check address. #[cfg(feature = "std")] fn from_ss58check_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { + const CHECKSUM_LEN: usize = 2; let mut res = Self::default(); - let len = res.as_mut().len(); - let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. - if d.len() != len + 3 { - // Invalid length. - return Err(PublicError::BadLength); - } - let ver = d[0].try_into().map_err(|_: ()| PublicError::UnknownVersion)?; - if d[len + 1..len + 3] != ss58hash(&d[0..len + 1]).as_bytes()[0..2] { + // Must decode to our type. + let body_len = res.as_mut().len(); + + let data = s.from_base58().map_err(|_| PublicError::BadBase58)?; + if data.len() < 2 { return Err(PublicError::BadLength); } + let (prefix_len, ident) = match data[0] { + 0..=63 => (1, data[0] as u16), + 64..=127 => { + // weird bit manipulation owing to the combination of LE encoding and missing two bits + // from the left. + // d[0] d[1] are: 01aaaaaa bbcccccc + // they make the LE-encoded 16-bit value: aaaaaabb 00cccccc + // so the lower byte is formed of aaaaaabb and the higher byte is 00cccccc + let lower = (data[0] << 2) | (data[1] >> 6); + let upper = data[1] & 0b00111111; + (2, (lower as u16) | ((upper as u16) << 8)) + } + _ => Err(PublicError::UnknownVersion)?, + }; + if data.len() != prefix_len + body_len + CHECKSUM_LEN { return Err(PublicError::BadLength) } + let format = ident.try_into().map_err(|_: ()| PublicError::UnknownVersion)?; + if !Self::format_is_allowed(format) { return Err(PublicError::FormatNotAllowed) } + + let hash = ss58hash(&data[0..body_len + prefix_len]); + let checksum = &hash.as_bytes()[0..CHECKSUM_LEN]; + if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. return Err(PublicError::InvalidChecksum); } - res.as_mut().copy_from_slice(&d[1..len + 1]); - Ok((res, ver)) + res.as_mut().copy_from_slice(&data[prefix_len..body_len + prefix_len]); + Ok((res, format)) } + /// Some if the string is a properly encoded SS58Check address, optionally with /// a derivation path following. #[cfg(feature = "std")] @@ -267,7 +299,20 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// Return the ss58-check string for this key. #[cfg(feature = "std")] fn to_ss58check_with_version(&self, version: Ss58AddressFormat) -> String { - let mut v = vec![version.into()]; + // We mask out the upper two bits of the ident - SS58 Prefix currently only supports 14-bits + let ident: u16 = u16::from(version) & 0b00111111_11111111; + let mut v = match ident { + 0..=63 => vec![ident as u8], + 64..=16_383 => { + // upper six bits of the lower byte(!) + let first = ((ident & 0b00000000_11111100) as u8) >> 2; + // lower two bits of the lower byte in the high pos, + // lower bits of the upper byte in the low pos + let second = ((ident >> 8) as u8) | ((ident & 0b00000000_00000011) as u8) << 6; + vec![first | 0b01000000, second] + } + _ => unreachable!("masked out the upper two bits; qed"), + }; v.extend(self.as_ref()); let r = ss58hash(&v); v.extend(&r.as_bytes()[0..2]); @@ -321,8 +366,8 @@ macro_rules! ss58_address_format { #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] pub enum Ss58AddressFormat { $(#[doc = $desc] $identifier),*, - /// Use a manually provided numeric value. - Custom(u8), + /// Use a manually provided numeric value as a standard identifier + Custom(u16), } #[cfg(feature = "std")] @@ -363,8 +408,16 @@ macro_rules! ss58_address_format { } } - impl From for u8 { - fn from(x: Ss58AddressFormat) -> u8 { + impl TryFrom for Ss58AddressFormat { + type Error = (); + + fn try_from(x: u8) -> Result { + Ss58AddressFormat::try_from(x as u16) + } + } + + impl From for u16 { + fn from(x: Ss58AddressFormat) -> u16 { match x { $(Ss58AddressFormat::$identifier => $number),*, Ss58AddressFormat::Custom(n) => n, @@ -372,22 +425,13 @@ macro_rules! ss58_address_format { } } - impl TryFrom for Ss58AddressFormat { + impl TryFrom for Ss58AddressFormat { type Error = (); - fn try_from(x: u8) -> Result { + fn try_from(x: u16) -> Result { match x { $($number => Ok(Ss58AddressFormat::$identifier)),*, - _ => { - #[cfg(feature = "std")] - match Ss58AddressFormat::default() { - Ss58AddressFormat::Custom(n) if n == x => Ok(Ss58AddressFormat::Custom(x)), - _ => Err(()), - } - - #[cfg(not(feature = "std"))] - Err(()) - }, + _ => Ok(Ss58AddressFormat::Custom(x)), } } } @@ -403,7 +447,7 @@ macro_rules! ss58_address_format { fn try_from(x: &'a str) -> Result { match x { $($name => Ok(Ss58AddressFormat::$identifier)),*, - a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), + a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), } } } @@ -444,12 +488,12 @@ macro_rules! ss58_address_format { ss58_address_format!( PolkadotAccount => (0, "polkadot", "Polkadot Relay-chain, standard account (*25519).") - Reserved1 => - (1, "reserved1", "Reserved for future use (1).") + BareSr25519 => + (1, "sr25519", "Bare 32-bit Schnorr/Ristretto 25519 (S/R 25519) key.") KusamaAccount => (2, "kusama", "Kusama Relay-chain, standard account (*25519).") - Reserved3 => - (3, "reserved3", "Reserved for future use (3).") + BareEd25519 => + (3, "ed25519", "Bare 32-bit Edwards Ed25519 key.") KatalChainAccount => (4, "katalchain", "Katal Chain, standard account (*25519).") PlasmAccount => @@ -472,6 +516,8 @@ ss58_address_format!( (13, "substratee", "Any SubstraTEE off-chain network private account (*25519).") TotemAccount => (14, "totem", "Any Totem Live Accounting network standard account (*25519).") + SynesthesiaAccount => + (15, "synesthesia", "Synesthesia mainnet, standard account (*25519).") KulupuAccount => (16, "kulupu", "Kulupu mainnet, standard account (*25519).") DarkAccount => @@ -492,28 +538,50 @@ ss58_address_format!( (24, "zero", "ZERO mainnet, standard account (*25519).") AlphavilleAccount => (25, "alphaville", "ZERO testnet, standard account (*25519).") + JupiterAccount => + (26, "jupiter", "Jupiter testnet, standard account (*25519).") + PatractAccount => + (27, "patract", "Patract mainnet, standard account (*25519).") SubsocialAccount => (28, "subsocial", "Subsocial network, standard account (*25519).") + DhiwayAccount => + (29, "cord", "Dhiway CORD network, standard account (*25519).") PhalaAccount => (30, "phala", "Phala Network, standard account (*25519).") + LitentryAccount => + (31, "litentry", "Litentry Network, standard account (*25519).") RobonomicsAccount => (32, "robonomics", "Any Robonomics network standard account (*25519).") DataHighwayAccount => (33, "datahighway", "DataHighway mainnet, standard account (*25519).") + AresAccount => + (34, "ares", "Ares Protocol, standard account (*25519).") + ValiuAccount => + (35, "vln", "Valiu Liquidity Network mainnet, standard account (*25519).") CentrifugeAccount => (36, "centrifuge", "Centrifuge Chain mainnet, standard account (*25519).") NodleAccount => (37, "nodle", "Nodle Chain mainnet, standard account (*25519).") + KiltAccount => + (38, "kilt", "KILT Chain mainnet, standard account (*25519).") + PolimecAccount => + (41, "poli", "Polimec Chain mainnet, standard account (*25519).") SubstrateAccount => (42, "substrate", "Any Substrate network, standard account (*25519).") - Reserved43 => - (43, "reserved43", "Reserved for future use (43).") + BareSecp256k1 => + (43, "secp256k1", "Bare ECDSA SECP256k1 key.") ChainXAccount => (44, "chainx", "ChainX mainnet, standard account (*25519).") + UniartsAccount => + (45, "uniarts", "UniArts Chain mainnet, standard account (*25519).") Reserved46 => (46, "reserved46", "Reserved for future use (46).") Reserved47 => (47, "reserved47", "Reserved for future use (47).") + AventusAccount => + (65, "aventus", "Aventus Chain mainnet, standard account (*25519).") + CrustAccount => + (66, "crust", "Crust Network, standard account (*25519).") // Note: 48 and above are reserved. ); @@ -1027,6 +1095,7 @@ pub trait CryptoType { Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode, PassByInner, crate::RuntimeDebug )] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct KeyTypeId(pub [u8; 4]); impl From for KeyTypeId { @@ -1043,6 +1112,7 @@ impl From for u32 { impl<'a> TryFrom<&'a str> for KeyTypeId { type Error = (); + fn try_from(x: &'a str) -> Result { let b = x.as_bytes(); if b.len() != 4 { @@ -1056,10 +1126,12 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { /// An identifier for a specific cryptographic algorithm used by a key pair #[derive(Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypeId(pub [u8; 4]); /// A type alias of CryptoTypeId & a public key #[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypePublicPair(pub CryptoTypeId, pub Vec); #[cfg(feature = "std")] diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index a836eb0e4c22f..fc9b16beedd13 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -554,6 +554,7 @@ mod test { use hex_literal::hex; use crate::crypto::{DEV_PHRASE, set_default_ss58_version}; use serde_json; + use crate::crypto::PublicError; #[test] fn default_phrase_should_be_used() { @@ -677,19 +678,65 @@ mod test { } #[test] - fn ss58check_custom_format_works() { + fn ss58check_format_check_works() { + use crate::crypto::Ss58AddressFormat; + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let format = Ss58AddressFormat::Reserved46; + let s = public.to_ss58check_with_version(format); + assert_eq!(Public::from_ss58check_with_version(&s), Err(PublicError::FormatNotAllowed)); + } + + #[test] + fn ss58check_full_roundtrip_works() { use crate::crypto::Ss58AddressFormat; - // temp save default format version - let default_format = Ss58AddressFormat::default(); - // set current ss58 version is custom "200" `Ss58AddressFormat::Custom(200)` - set_default_ss58_version(Ss58AddressFormat::Custom(200)); - // custom addr encoded by version 200 - let addr = "2X64kMNEWAW5KLZMSKcGKEc96MyuaRsRUku7vomuYxKgqjVCRj"; - Public::from_ss58check(&addr).unwrap(); - set_default_ss58_version(default_format); - // set current ss58 version to default version - let addr = "KWAfgC2aRG5UVD6CpbPQXCx4YZZUhvWqqAJE6qcYc9Rtr6g5C"; - Public::from_ss58check(&addr).unwrap(); + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let format = Ss58AddressFormat::PolkadotAccount; + let s = public.to_ss58check_with_version(format); + let (k, f) = Public::from_ss58check_with_version(&s).unwrap(); + assert_eq!(k, public); + assert_eq!(f, format); + + let format = Ss58AddressFormat::Custom(64); + let s = public.to_ss58check_with_version(format); + let (k, f) = Public::from_ss58check_with_version(&s).unwrap(); + assert_eq!(k, public); + assert_eq!(f, format); + } + + #[test] + fn ss58check_custom_format_works() { + // We need to run this test in its own process to not interfere with other tests running in + // parallel and also relying on the ss58 version. + if std::env::var("RUN_CUSTOM_FORMAT_TEST") == Ok("1".into()) { + use crate::crypto::Ss58AddressFormat; + // temp save default format version + let default_format = Ss58AddressFormat::default(); + // set current ss58 version is custom "200" `Ss58AddressFormat::Custom(200)` + + set_default_ss58_version(Ss58AddressFormat::Custom(200)); + // custom addr encoded by version 200 + let addr = "4pbsSkWcBaYoFHrKJZp5fDVUKbqSYD9dhZZGvpp3vQ5ysVs5ybV"; + Public::from_ss58check(&addr).unwrap(); + + set_default_ss58_version(default_format); + // set current ss58 version to default version + let addr = "KWAfgC2aRG5UVD6CpbPQXCx4YZZUhvWqqAJE6qcYc9Rtr6g5C"; + Public::from_ss58check(&addr).unwrap(); + + println!("CUSTOM_FORMAT_SUCCESSFUL"); + } else { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_CUSTOM_FORMAT_TEST", "1") + .args(&["--nocapture", "ss58check_custom_format_works"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stdout).unwrap(); + assert!(output.contains("CUSTOM_FORMAT_SUCCESSFUL")); + } } #[test] diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index ad08f9ab8baef..6589310931200 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/hash.rs b/primitives/core/src/hash.rs index 20a6788c32070..dcaafd2906de4 100644 --- a/primitives/core/src/hash.rs +++ b/primitives/core/src/hash.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/hasher.rs b/primitives/core/src/hasher.rs index 8ccaa4d90a78d..13a168c70f93c 100644 --- a/primitives/core/src/hasher.rs +++ b/primitives/core/src/hasher.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/hashing.rs b/primitives/core/src/hashing.rs index 98dc0c2efc597..0b67d33235ae3 100644 --- a/primitives/core/src/hashing.rs +++ b/primitives/core/src/hashing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +16,11 @@ // limitations under the License. //! Hashing functions. +//! +//! This module is gated by `full-crypto` feature. If you intend to use any of the functions +//! defined here within your runtime, you should most likely rather use [sp_io::hashing] instead, +//! unless you know what you're doing. Using `sp_io` will be more performant, since instead of +//! computing the hash in WASM it delegates that computation to the host client. use blake2_rfc; use sha2::{Digest, Sha256}; @@ -72,7 +77,7 @@ pub fn blake2_64(data: &[u8]) -> [u8; 8] { /// Do a XX 64-bit hash and place result in `dest`. pub fn twox_64_into(data: &[u8], dest: &mut [u8; 8]) { - use ::core::hash::Hasher; + use core::hash::Hasher; let mut h0 = twox_hash::XxHash::with_seed(0); h0.write(data); let r0 = h0.finish(); @@ -89,7 +94,7 @@ pub fn twox_64(data: &[u8]) -> [u8; 8] { /// Do a XX 128-bit hash and place result in `dest`. pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { - use ::core::hash::Hasher; + use core::hash::Hasher; let mut h0 = twox_hash::XxHash::with_seed(0); let mut h1 = twox_hash::XxHash::with_seed(1); h0.write(data); @@ -158,8 +163,8 @@ pub fn keccak_512(data: &[u8]) -> [u8; 64] { /// Do a sha2 256-bit hash and return result. pub fn sha2_256(data: &[u8]) -> [u8; 32] { let mut hasher = Sha256::new(); - hasher.input(data); + hasher.update(data); let mut output = [0u8; 32]; - output.copy_from_slice(&hasher.result()); + output.copy_from_slice(&hasher.finalize()); output } diff --git a/primitives/core/src/hexdisplay.rs b/primitives/core/src/hexdisplay.rs index 9d2b7a12d032e..304b665a72c9b 100644 --- a/primitives/core/src/hexdisplay.rs +++ b/primitives/core/src/hexdisplay.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 7857937aebfd1..7fc9fa0919698 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index 4768496c4a508..ef6c38a7d6fd7 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -746,6 +746,14 @@ impl TransactionPoolExt { } } +/// Change to be applied to the offchain worker db in regards to a key. +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub enum OffchainOverlayedChange { + /// Remove the data associated with the key + Remove, + /// Overwrite the value of an associated key + SetValue(Vec), +} #[cfg(test)] mod tests { diff --git a/primitives/core/src/offchain/storage.rs b/primitives/core/src/offchain/storage.rs index 7d7c711ed95f0..f114c102fb82c 100644 --- a/primitives/core/src/offchain/storage.rs +++ b/primitives/core/src/offchain/storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +41,7 @@ impl InMemOffchainStorage { /// Remove a key and its associated value from the offchain database. pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { let key: Vec = prefix.iter().chain(key).cloned().collect(); - let _ = self.storage.remove(&key); + self.storage.remove(&key); } } @@ -83,215 +83,3 @@ impl OffchainStorage for InMemOffchainStorage { } } } - - - - -/// Change to be applied to the offchain worker db in regards to a key. -#[derive(Debug,Clone,Hash,Eq,PartialEq)] -pub enum OffchainOverlayedChange { - /// Remove the data associated with the key - Remove, - /// Overwrite the value of an associated key - SetValue(Vec), -} - -/// In-memory storage for offchain workers recoding changes for the actual offchain storage implementation. -#[derive(Debug, Clone)] -pub enum OffchainOverlayedChanges { - /// Writing overlay changes to the offchain worker database is disabled by configuration. - Disabled, - /// Overlay changes can be recorded using the inner collection of this variant, - /// where the identifier is the tuple of `(prefix, key)`. - Enabled(HashMap<(Vec, Vec), OffchainOverlayedChange>), -} - -impl Default for OffchainOverlayedChanges { - fn default() -> Self { - Self::Disabled - } -} - -impl OffchainOverlayedChanges { - /// Create the disabled variant. - pub fn disabled() -> Self { - Self::Disabled - } - - /// Create the enabled variant. - pub fn enabled() -> Self { - Self::Enabled(HashMap::new()) - } - - /// Consume the offchain storage and iterate over all key value pairs. - pub fn into_iter(self) -> OffchainOverlayedChangesIntoIter { - OffchainOverlayedChangesIntoIter::new(self) - } - - /// Iterate over all key value pairs by reference. - pub fn iter<'a>(&'a self) -> OffchainOverlayedChangesIter { - OffchainOverlayedChangesIter::new(&self) - } - - /// Drain all elements of changeset. - pub fn drain<'a, 'd>(&'a mut self) -> OffchainOverlayedChangesDrain<'d> where 'a: 'd { - OffchainOverlayedChangesDrain::new(self) - } - - /// Remove a key and its associated value from the offchain database. - pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { - if let Self::Enabled(ref mut storage) = self { - let _ = storage.insert((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove); - } - } - - /// Set the value associated with a key under a prefix to the value provided. - pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - if let Self::Enabled(ref mut storage) = self { - let _ = storage.insert((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::SetValue(value.to_vec())); - } - } - - /// Obtain a associated value to the given key in storage with prefix. - pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { - if let Self::Enabled(ref storage) = self { - let key = (prefix.to_vec(), key.to_vec()); - storage.get(&key).cloned() - } else { - None - } - } -} - -use std::collections::hash_map; - -/// Iterate by reference over the prepared offchain worker storage changes. -pub struct OffchainOverlayedChangesIter<'i> { - inner: Option, Vec), OffchainOverlayedChange>>, -} - -impl<'i> Iterator for OffchainOverlayedChangesIter<'i> { - type Item = (&'i (Vec, Vec), &'i OffchainOverlayedChange); - fn next(&mut self) -> Option { - if let Some(ref mut iter) = self.inner { - iter.next() - } else { - None - } - } -} - -impl<'i> OffchainOverlayedChangesIter<'i> { - /// Create a new iterator based on a refernce to the parent container. - pub fn new(container: &'i OffchainOverlayedChanges) -> Self { - match container { - OffchainOverlayedChanges::Enabled(inner) => Self { - inner: Some(inner.iter()) - }, - OffchainOverlayedChanges::Disabled => Self { inner: None, }, - } - } -} - - -/// Iterate by value over the prepared offchain worker storage changes. -pub struct OffchainOverlayedChangesIntoIter { - inner: Option,Vec),OffchainOverlayedChange>>, -} - -impl Iterator for OffchainOverlayedChangesIntoIter { - type Item = ((Vec, Vec), OffchainOverlayedChange); - fn next(&mut self) -> Option { - if let Some(ref mut iter) = self.inner { - iter.next() - } else { - None - } - } -} - -impl OffchainOverlayedChangesIntoIter { - /// Create a new iterator by consuming the collection. - pub fn new(container: OffchainOverlayedChanges) -> Self { - match container { - OffchainOverlayedChanges::Enabled(inner) => Self { - inner: Some(inner.into_iter()) - }, - OffchainOverlayedChanges::Disabled => Self { inner: None, }, - } - } -} - -/// Iterate over all items while draining them from the collection. -pub struct OffchainOverlayedChangesDrain<'d> { - inner: Option, Vec), OffchainOverlayedChange>>, -} - -impl<'d> Iterator for OffchainOverlayedChangesDrain<'d> { - type Item = ((Vec, Vec), OffchainOverlayedChange); - fn next(&mut self) -> Option { - if let Some(ref mut iter) = self.inner { - iter.next() - } else { - None - } - } -} - -impl<'d> OffchainOverlayedChangesDrain<'d> { - /// Create a new iterator by taking a mut reference to the collection, - /// for the lifetime of the created drain iterator. - pub fn new(container: &'d mut OffchainOverlayedChanges) -> Self { - match container { - OffchainOverlayedChanges::Enabled(ref mut inner) => Self { - inner: Some(inner.drain()) - }, - OffchainOverlayedChanges::Disabled => Self { inner: None, }, - } - } -} - - -#[cfg(test)] -mod test { - use super::*; - use super::super::STORAGE_PREFIX; - - #[test] - fn test_drain() { - let mut ooc = OffchainOverlayedChanges::enabled(); - ooc.set(STORAGE_PREFIX,b"kkk", b"vvv"); - let drained = ooc.drain().count(); - assert_eq!(drained, 1); - let leftover = ooc.iter().count(); - assert_eq!(leftover, 0); - - ooc.set(STORAGE_PREFIX, b"a", b"v"); - ooc.set(STORAGE_PREFIX, b"b", b"v"); - ooc.set(STORAGE_PREFIX, b"c", b"v"); - ooc.set(STORAGE_PREFIX, b"d", b"v"); - ooc.set(STORAGE_PREFIX, b"e", b"v"); - assert_eq!(ooc.iter().count(), 5); - } - - #[test] - fn test_accumulated_set_remove_set() { - let mut ooc = OffchainOverlayedChanges::enabled(); - ooc.set(STORAGE_PREFIX, b"ppp", b"qqq"); - ooc.remove(STORAGE_PREFIX, b"ppp"); - // keys are equiv, so it will overwrite the value and the overlay will contain - // one item - assert_eq!(ooc.iter().count(), 1); - - ooc.set(STORAGE_PREFIX, b"ppp", b"rrr"); - let mut iter = ooc.into_iter(); - assert_eq!( - iter.next(), - Some( - ((STORAGE_PREFIX.to_vec(), b"ppp".to_vec()), - OffchainOverlayedChange::SetValue(b"rrr".to_vec())) - ) - ); - assert_eq!(iter.next(), None); - } -} diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index 3fe34cc0cfa7b..da486a3d03b16 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,8 @@ use std::{ use crate::OpaquePeerId; use crate::offchain::{ self, - storage::{InMemOffchainStorage, OffchainOverlayedChange, OffchainOverlayedChanges}, + OffchainOverlayedChange, + storage::InMemOffchainStorage, HttpError, HttpRequestId as RequestId, HttpRequestStatus as RequestStatus, @@ -70,6 +71,8 @@ pub struct TestPersistentOffchainDB { } impl TestPersistentOffchainDB { + const PREFIX: &'static [u8] = b""; + /// Create a new and empty offchain storage db for persistent items pub fn new() -> Self { Self { @@ -78,15 +81,23 @@ impl TestPersistentOffchainDB { } /// Apply a set of off-chain changes directly to the test backend - pub fn apply_offchain_changes(&mut self, changes: &mut OffchainOverlayedChanges) { + pub fn apply_offchain_changes( + &mut self, + changes: impl Iterator, Vec), OffchainOverlayedChange)>, + ) { let mut me = self.persistent.write(); - for ((_prefix, key), value_operation) in changes.drain() { + for ((_prefix, key), value_operation) in changes { match value_operation { - OffchainOverlayedChange::SetValue(val) => me.set(b"", key.as_slice(), val.as_slice()), - OffchainOverlayedChange::Remove => me.remove(b"", key.as_slice()), + OffchainOverlayedChange::SetValue(val) => me.set(Self::PREFIX, key.as_slice(), val.as_slice()), + OffchainOverlayedChange::Remove => me.remove(Self::PREFIX, key.as_slice()), } } } + + /// Retrieve a key from the test backend. + pub fn get(&self, key: &[u8]) -> Option> { + OffchainStorage::get(self, Self::PREFIX, key) + } } impl OffchainStorage for TestPersistentOffchainDB { @@ -266,8 +277,8 @@ impl offchain::Externalities for TestOffchainExt { fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { let state = self.0.read(); match kind { - StorageKind::LOCAL => state.local_storage.get(b"", key), - StorageKind::PERSISTENT => state.persistent_storage.get(b"", key), + StorageKind::LOCAL => state.local_storage.get(TestPersistentOffchainDB::PREFIX, key), + StorageKind::PERSISTENT => state.persistent_storage.get(key), } } diff --git a/primitives/core/src/sandbox.rs b/primitives/core/src/sandbox.rs index 4cb5bd41d5826..a15a7af418313 100644 --- a/primitives/core/src/sandbox.rs +++ b/primitives/core/src/sandbox.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,12 +31,12 @@ pub struct HostError; pub enum ExternEntity { /// Function that is specified by an index in a default table of /// a module that creates the sandbox. - #[codec(index = "1")] + #[codec(index = 1)] Function(u32), /// Linear memory that is specified by some identifier returned by sandbox /// module upon creation new sandboxed memory. - #[codec(index = "2")] + #[codec(index = 2)] Memory(u32), } diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 9a757c8900542..37926d8f801c5 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index cee8bec22aa08..1506abb77f9c1 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 97100ea58f8c7..8488a1873cacf 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/u32_trait.rs b/primitives/core/src/u32_trait.rs index 6f73e1f6ba719..07f9bb0032832 100644 --- a/primitives/core/src/u32_trait.rs +++ b/primitives/core/src/u32_trait.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index ef1adc4a0e0ee..f917f472d787b 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index cc3fe7cd1b474..4062ba292352f 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-database" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,5 +11,5 @@ documentation = "https://docs.rs/sp-database" readme = "README.md" [dependencies] -parking_lot = "0.10.0" -kvdb = "0.7.0" +parking_lot = "0.11.1" +kvdb = "0.9.0" diff --git a/primitives/database/src/error.rs b/primitives/database/src/error.rs index 3253839bbeff8..4bf5a20aff401 100644 --- a/primitives/database/src/error.rs +++ b/primitives/database/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index f436979aaf4c1..b50ca53786f9f 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,7 @@ fn handle_err(result: std::io::Result) -> T { match result { Ok(r) => r, Err(e) => { - panic!("Critical database eror: {:?}", e); + panic!("Critical database error: {:?}", e); } } } diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index 1908eb49bb6c6..7107ea25c02c0 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -115,6 +115,11 @@ pub trait Database: Send + Sync { /// `key` is not currently in the database. fn get(&self, col: ColumnId, key: &[u8]) -> Option>; + /// Check if the value exists in the database without retrieving it. + fn contains(&self, col: ColumnId, key: &[u8]) -> bool { + self.get(col, key).is_some() + } + /// Call `f` with the value previously stored against `key`. /// /// This may be faster than `get` since it doesn't allocate. diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs index 51cb854334d50..41af2e2f235c0 100644 --- a/primitives/database/src/mem.rs +++ b/primitives/database/src/mem.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 10164553f857c..0d3ba805100c4 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.3" -syn = "1.0.7" +syn = "1.0.58" proc-macro2 = "1.0" [features] diff --git a/primitives/debug-derive/src/impls.rs b/primitives/debug-derive/src/impls.rs index 1757b294d9d49..898e4eef5d06b 100644 --- a/primitives/debug-derive/src/impls.rs +++ b/primitives/debug-derive/src/impls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/debug-derive/src/lib.rs b/primitives/debug-derive/src/lib.rs index db370f890810d..ebfbd614d9c8d 100644 --- a/primitives/debug-derive/src/lib.rs +++ b/primitives/debug-derive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,9 +27,9 @@ //! //! ```rust //! #[derive(sp_debug_derive::RuntimeDebug)] -//! struct MyStruct; +//! struct MyStruct; //! -//! assert_eq!(format!("{:?}", MyStruct), "MyStruct"); +//! assert_eq!(format!("{:?}", MyStruct), "MyStruct"); //! ``` mod impls; diff --git a/primitives/debug-derive/tests/tests.rs b/primitives/debug-derive/tests/tests.rs index 6a03762b1c655..d51d6a05bf21c 100644 --- a/primitives/debug-derive/tests/tests.rs +++ b/primitives/debug-derive/tests/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/election-providers/Cargo.toml b/primitives/election-providers/Cargo.toml new file mode 100644 index 0000000000000..cf12dce8098d7 --- /dev/null +++ b/primitives/election-providers/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "sp-election-providers" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Primitive election providers" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../arithmetic" } +sp-npos-elections = { version = "3.0.0", default-features = false, path = "../npos-elections" } + +[dev-dependencies] +sp-npos-elections = { version = "3.0.0", path = "../npos-elections" } +sp-runtime = { version = "3.0.0", path = "../runtime" } + +[features] +default = ["std"] +runtime-benchmarks = [] +std = [ + "codec/std", + "sp-std/std", + "sp-npos-elections/std", + "sp-arithmetic/std", +] diff --git a/primitives/election-providers/src/lib.rs b/primitives/election-providers/src/lib.rs new file mode 100644 index 0000000000000..73ea58c176b26 --- /dev/null +++ b/primitives/election-providers/src/lib.rs @@ -0,0 +1,241 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitive traits for providing election functionality. +//! +//! This crate provides two traits that could interact to enable extensible election functionality +//! within FRAME pallets. +//! +//! Something that will provide the functionality of election will implement [`ElectionProvider`], +//! whilst needing an associated [`ElectionProvider::DataProvider`], which needs to be fulfilled by +//! an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* the receiver +//! of the election, resulting in a diagram as below: +//! +//! ```ignore +//! ElectionDataProvider +//! <------------------------------------------+ +//! | | +//! v | +//! +-----+----+ +------+---+ +//! | | | | +//! pallet-do-election | | | | pallet-needs-election +//! | | | | +//! | | | | +//! +-----+----+ +------+---+ +//! | ^ +//! | | +//! +------------------------------------------+ +//! ElectionProvider +//! ``` +//! +//! > It could also be possible that a third party pallet (C), provides the data of election to an +//! > election provider (B), which then passes the election result to another pallet (A). +//! +//! ## Election Types +//! +//! Typically, two types of elections exist: +//! +//! 1. **Stateless**: Election data is provided, and the election result is immediately ready. +//! 2. **Stateful**: Election data is is queried ahead of time, and the election result might be +//! ready some number of blocks in the future. +//! +//! To accommodate both type of elections in one trait, the traits lean toward **stateful +//! election**, as it is more general than the stateless. This is why [`ElectionProvider::elect`] +//! has no parameters. All value and type parameter must be provided by the [`ElectionDataProvider`] +//! trait, even if the election happens immediately. +//! +//! ## Election Data +//! +//! The data associated with an election, essentially what the [`ElectionDataProvider`] must convey +//! is as follows: +//! +//! 1. A list of voters, with their stake. +//! 2. A list of targets (i.e. _candidates_). +//! 3. A number of desired targets to be elected (i.e. _winners_) +//! +//! In addition to that, the [`ElectionDataProvider`] must also hint [`ElectionProvider`] at when +//! the next election might happen ([`ElectionDataProvider::next_election_prediction`]). A stateless +//! election provider would probably ignore this. A stateful election provider can use this to +//! prepare the election result in advance. +//! +//! Nonetheless, an [`ElectionProvider`] shan't rely on this and should preferably provide some +//! means of fallback election as well, in case the `elect` was called immaturely early. +//! +//! ## Example +//! +//! ```rust +//! # use sp_election_providers::*; +//! # use sp_npos_elections::{Support, Assignment}; +//! +//! type AccountId = u64; +//! type Balance = u64; +//! type BlockNumber = u32; +//! +//! mod data_provider { +//! use super::*; +//! +//! pub trait Config: Sized { +//! type ElectionProvider: ElectionProvider< +//! AccountId, +//! BlockNumber, +//! DataProvider = Module, +//! >; +//! } +//! +//! pub struct Module(std::marker::PhantomData); +//! +//! impl ElectionDataProvider for Module { +//! fn desired_targets() -> u32 { +//! 1 +//! } +//! fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { +//! Default::default() +//! } +//! fn targets() -> Vec { +//! vec![10, 20, 30] +//! } +//! fn next_election_prediction(now: BlockNumber) -> BlockNumber { +//! 0 +//! } +//! } +//! } +//! +//! +//! mod generic_election_provider { +//! use super::*; +//! +//! pub struct GenericElectionProvider(std::marker::PhantomData); +//! +//! pub trait Config { +//! type DataProvider: ElectionDataProvider; +//! } +//! +//! impl ElectionProvider for GenericElectionProvider { +//! type Error = (); +//! type DataProvider = T::DataProvider; +//! +//! fn elect() -> Result, Self::Error> { +//! Self::DataProvider::targets() +//! .first() +//! .map(|winner| vec![(*winner, Support::default())]) +//! .ok_or(()) +//! } +//! } +//! } +//! +//! mod runtime { +//! use super::generic_election_provider; +//! use super::data_provider; +//! use super::AccountId; +//! +//! struct Runtime; +//! impl generic_election_provider::Config for Runtime { +//! type DataProvider = data_provider::Module; +//! } +//! +//! impl data_provider::Config for Runtime { +//! type ElectionProvider = generic_election_provider::GenericElectionProvider; +//! } +//! +//! } +//! +//! # fn main() {} +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod onchain; +use sp_std::{prelude::*, fmt::Debug}; + +/// Re-export some type as they are used in the interface. +pub use sp_arithmetic::PerThing; +pub use sp_npos_elections::{Assignment, ExtendedBalance, PerThing128, Supports, VoteWeight}; + +/// Something that can provide the data to an [`ElectionProvider`]. +pub trait ElectionDataProvider { + /// All possible targets for the election, i.e. the candidates. + fn targets() -> Vec; + + /// All possible voters for the election. + /// + /// Note that if a notion of self-vote exists, it should be represented here. + fn voters() -> Vec<(AccountId, VoteWeight, Vec)>; + + /// The number of targets to elect. + fn desired_targets() -> u32; + + /// Provide a best effort prediction about when the next election is about to happen. + /// + /// In essence, the implementor should predict with this function when it will trigger the + /// [`ElectionProvider::elect`]. + /// + /// This is only useful for stateful election providers. + fn next_election_prediction(now: BlockNumber) -> BlockNumber; + + /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, + /// else a noop. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn put_snapshot( + _voters: Vec<(AccountId, VoteWeight, Vec)>, + _targets: Vec, + ) { + } +} + +#[cfg(feature = "std")] +impl ElectionDataProvider for () { + fn targets() -> Vec { + Default::default() + } + fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { + Default::default() + } + fn desired_targets() -> u32 { + Default::default() + } + fn next_election_prediction(now: BlockNumber) -> BlockNumber { + now + } +} + +/// Something that can compute the result of an election and pass it back to the caller. +/// +/// This trait only provides an interface to _request_ an election, i.e. +/// [`ElectionProvider::elect`]. That data required for the election need to be passed to the +/// implemented of this trait through [`ElectionProvider::DataProvider`]. +pub trait ElectionProvider { + /// The error type that is returned by the provider. + type Error: Debug; + + /// The data provider of the election. + type DataProvider: ElectionDataProvider; + + /// Elect a new set of winners. + /// + /// The result is returned in a target major format, namely as vector of supports. + fn elect() -> Result, Self::Error>; +} + +#[cfg(feature = "std")] +impl ElectionProvider for () { + type Error = &'static str; + type DataProvider = (); + + fn elect() -> Result, Self::Error> { + Err("<() as ElectionProvider> cannot do anything.") + } +} diff --git a/primitives/election-providers/src/onchain.rs b/primitives/election-providers/src/onchain.rs new file mode 100644 index 0000000000000..b50dff2ff17d9 --- /dev/null +++ b/primitives/election-providers/src/onchain.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! An implementation of [`ElectionProvider`] that does an on-chain sequential phragmen. + +use crate::{ElectionDataProvider, ElectionProvider}; +use sp_npos_elections::*; +use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; + +/// Errors of the on-chain election. +#[derive(Eq, PartialEq, Debug)] +pub enum Error { + /// An internal error in the NPoS elections crate. + NposElections(sp_npos_elections::Error), +} + +impl From for Error { + fn from(e: sp_npos_elections::Error) -> Self { + Error::NposElections(e) + } +} + +/// A simple on-chain implementation of the election provider trait. +/// +/// This will accept voting data on the fly and produce the results immediately. +/// +/// ### Warning +/// +/// This can be very expensive to run frequently on-chain. Use with care. +pub struct OnChainSequentialPhragmen(PhantomData); + +/// Configuration trait of [`OnChainSequentialPhragmen`]. +/// +/// Note that this is similar to a pallet traits, but [`OnChainSequentialPhragmen`] is not a pallet. +pub trait Config { + /// The account identifier type. + type AccountId: IdentifierT; + /// The block number type. + type BlockNumber; + /// The accuracy used to compute the election: + type Accuracy: PerThing128; + /// Something that provides the data for election. + type DataProvider: ElectionDataProvider; +} + +impl ElectionProvider for OnChainSequentialPhragmen { + type Error = Error; + type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + let voters = Self::DataProvider::voters(); + let targets = Self::DataProvider::targets(); + let desired_targets = Self::DataProvider::desired_targets() as usize; + + let mut stake_map: BTreeMap = BTreeMap::new(); + + voters.iter().for_each(|(v, s, _)| { + stake_map.insert(v.clone(), *s); + }); + + let stake_of = |w: &T::AccountId| -> VoteWeight { + stake_map.get(w).cloned().unwrap_or_default() + }; + + let ElectionResult { winners, assignments } = + seq_phragmen::<_, T::Accuracy>(desired_targets, targets, voters, None) + .map_err(Error::from)?; + + let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; + let winners = to_without_backing(winners); + + to_supports(&winners, &staked).map_err(Error::from) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_npos_elections::Support; + use sp_runtime::Perbill; + + type AccountId = u64; + type BlockNumber = u32; + + struct Runtime; + impl Config for Runtime { + type AccountId = AccountId; + type BlockNumber = BlockNumber; + type Accuracy = Perbill; + type DataProvider = mock_data_provider::DataProvider; + } + + type OnChainPhragmen = OnChainSequentialPhragmen; + + mod mock_data_provider { + use super::*; + + pub struct DataProvider; + + impl ElectionDataProvider for DataProvider { + fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { + vec![ + (1, 10, vec![10, 20]), + (2, 20, vec![30, 20]), + (3, 30, vec![10, 30]), + ] + } + + fn targets() -> Vec { + vec![10, 20, 30] + } + + fn desired_targets() -> u32 { + 2 + } + + fn next_election_prediction(_: BlockNumber) -> BlockNumber { + 0 + } + } + } + + #[test] + fn onchain_seq_phragmen_works() { + assert_eq!( + OnChainPhragmen::elect().unwrap(), + vec![ + ( + 10, + Support { + total: 25, + voters: vec![(1, 10), (3, 15)] + } + ), + ( + 30, + Support { + total: 35, + voters: vec![(2, 20), (3, 15)] + } + ) + ] + ); + } +} diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 9000dde058cd4..05de1837dc1d5 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.8.0" +version = "0.9.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,10 +14,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-storage = { version = "2.0.0", path = "../storage", default-features = false } -sp-std = { version = "2.0.0", path = "../std", default-features = false } +sp-storage = { version = "3.0.0", path = "../storage", default-features = false } +sp-std = { version = "3.0.0", path = "../std", default-features = false } environmental = { version = "1.1.2", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } [features] default = ["std"] diff --git a/primitives/externalities/src/extensions.rs b/primitives/externalities/src/extensions.rs index a7f5ee8bc739e..69c6c09be4487 100644 --- a/primitives/externalities/src/extensions.rs +++ b/primitives/externalities/src/extensions.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -98,7 +98,7 @@ pub trait ExtensionStore { /// instead of this function to get type system support and automatic type downcasting. fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any>; - /// Register extension `extension` with speciifed `type_id`. + /// Register extension `extension` with specified `type_id`. /// /// It should return error if extension is already registered. fn register_extension_with_type_id(&mut self, type_id: TypeId, extension: Box) -> Result<(), Error>; diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 388482964f18c..a10ce32bdc855 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -137,7 +137,17 @@ pub trait Externalities: ExtensionStore { ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, child_info: &ChildInfo); + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend. No + /// limit is applied if `limit` is `None`. Returns `true` if the child trie was + /// removed completely and `false` if there are remaining keys after the function + /// returns. + /// + /// # Note + /// + /// An implementation is free to delete more keys than the specified limit as long as + /// it is able to do that in constant time. + fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> bool; /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -160,9 +170,6 @@ pub trait Externalities: ExtensionStore { value: Option>, ); - /// Get the identity of the chain. - fn chain_id(&self) -> u64; - /// Get the trie root of the current storage map. /// /// This will also update all child storage keys in the top-level storage map. diff --git a/primitives/externalities/src/scope_limited.rs b/primitives/externalities/src/scope_limited.rs index 1f70276f02d36..3b5013ba8e7fe 100644 --- a/primitives/externalities/src/scope_limited.rs +++ b/primitives/externalities/src/scope_limited.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 88098139ceec0..c8ff2fc0a2e65 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -grandpa = { package = "finality-grandpa", version = "0.12.3", default-features = false, features = ["derive-codec"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +grandpa = { package = "finality-grandpa", version = "0.13.0", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-keystore = { version = "0.8.0", default-features = false, path = "../keystore", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-api = { version = "3.0.0", default-features = false, path = "../api" } +sp-application-crypto = { version = "3.0.0", default-features = false, path = "../application-crypto" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-keystore = { version = "0.9.0", default-features = false, path = "../keystore", optional = true } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 2c569fafda4ce..383e4fe37134c 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -102,7 +102,7 @@ pub enum ConsensusLog { /// This should be a pure function: i.e. as long as the runtime can interpret /// the digest type it should return the same result regardless of the current /// state. - #[codec(index = "1")] + #[codec(index = 1)] ScheduledChange(ScheduledChange), /// Force an authority set change. /// @@ -118,18 +118,18 @@ pub enum ConsensusLog { /// This should be a pure function: i.e. as long as the runtime can interpret /// the digest type it should return the same result regardless of the current /// state. - #[codec(index = "2")] + #[codec(index = 2)] ForcedChange(N, ScheduledChange), /// Note that the authority with given index is disabled until the next change. - #[codec(index = "3")] + #[codec(index = 3)] OnDisabled(AuthorityIndex), /// A signal to pause the current authority set after the given delay. /// After finalizing the block at _delay_ the authorities should stop voting. - #[codec(index = "4")] + #[codec(index = 4)] Pause(N), /// A signal to resume the current authority set after the given delay. /// After authoring the block at _delay_ the authorities should resume voting. - #[codec(index = "5")] + #[codec(index = 5)] Resume(N), } @@ -252,6 +252,14 @@ impl Equivocation { Equivocation::Precommit(ref equivocation) => &equivocation.identity, } } + + /// Returns the round number when the equivocation happened. + pub fn round_number(&self) -> RoundNumber { + match self { + Equivocation::Prevote(ref equivocation) => equivocation.round_number, + Equivocation::Precommit(ref equivocation) => equivocation.round_number, + } + } } /// Verifies the equivocation proof by making sure that both votes target diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index fdece1f9d3d34..c0e74c0fb99fd 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-inherents" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,10 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +parking_lot = { version = "0.11.1", optional = true } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } thiserror = { version = "1.0.21", optional = true } [features] diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index e91fb06e3f342..36a1b32775c30 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -398,9 +398,11 @@ impl IsFatalError for MakeFatalError { } } -/// A module that provides an inherent and may also verifies it. +/// A pallet that provides or verifies an inherent extrinsic. +/// +/// The pallet may provide the inherent, verify an inherent, or both provide and verify. pub trait ProvideInherent { - /// The call type of the module. + /// The call type of the pallet. type Call; /// The error returned by `check_inherent`. type Error: codec::Encode + IsFatalError; @@ -410,13 +412,27 @@ pub trait ProvideInherent { /// Create an inherent out of the given `InherentData`. fn create_inherent(data: &InherentData) -> Option; - /// If `Some`, indicates that an inherent is required. Check will return the inner error if no - /// inherent is found. If `Err`, indicates that the check failed and further operations should - /// be aborted. + /// Determines whether this inherent is required in this block. + /// + /// - `Ok(None)` indicates that this inherent is not required in this block. The default + /// implementation returns this. + /// + /// - `Ok(Some(e))` indicates that this inherent is required in this block. The + /// `impl_outer_inherent!`, will call this function from its `check_extrinsics`. + /// If the inherent is not present, it will return `e`. + /// + /// - `Err(_)` indicates that this function failed and further operations should be aborted. + /// + /// CAUTION: This check has a bug when used in pallets that also provide unsigned transactions. + /// See https://github.com/paritytech/substrate/issues/6243 for details. fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } - /// Check the given inherent if it is valid. - /// Checking the inherent is optional and can be omitted. + /// Check whether the given inherent is valid. Checking the inherent is optional and can be + /// omitted by using the default implementation. + /// + /// When checking an inherent, the first parameter represents the inherent that is actually + /// included in the block by its author. Whereas the second parameter represents the inherent + /// data that the verifying node calculates. fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { Ok(()) } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index e8483b2ef68c5..f87711b17234f 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,22 +15,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-keystore = { version = "0.8.0", default-features = false, optional = true, path = "../keystore" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-keystore = { version = "0.9.0", default-features = false, optional = true, path = "../keystore" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" } +sp-state-machine = { version = "0.9.0", optional = true, path = "../state-machine" } +sp-wasm-interface = { version = "3.0.0", path = "../wasm-interface", default-features = false } +sp-runtime-interface = { version = "3.0.0", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "3.0.0", optional = true, path = "../trie" } +sp-externalities = { version = "0.9.0", optional = true, path = "../externalities" } +sp-tracing = { version = "3.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } -parking_lot = { version = "0.10.0", optional = true } -tracing = { version = "0.1.19", default-features = false } +parking_lot = { version = "0.11.1", optional = true } +tracing = { version = "0.1.22", default-features = false } tracing-core = { version = "0.1.17", default-features = false} [features] diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs index 39229b1200b91..341df36c55649 100644 --- a/primitives/io/src/batch_verifier.rs +++ b/primitives/io/src/batch_verifier.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 382a0c4b3bd6a..397dd3c21712a 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -279,7 +279,35 @@ pub trait DefaultChildStorage { storage_key: &[u8], ) { let child_info = ChildInfo::new_default(storage_key); - self.kill_child_storage(&child_info); + self.kill_child_storage(&child_info, None); + } + + /// Clear a child storage key. + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend if + /// it is set to `Some`. No limit is applied when `limit` is set to `None`. + /// + /// The limit can be used to partially delete a child trie in case it is too large + /// to delete in one go (block). + /// + /// It returns false iff some keys are remaining in + /// the child trie after the functions returns. + /// + /// # Note + /// + /// Please note that keys that are residing in the overlay for that child trie when + /// issuing this call are all deleted without counting towards the `limit`. Only keys + /// written during the current block are part of the overlay. Deleting with a `limit` + /// mostly makes sense with an empty overlay for that child trie. + /// + /// Calling this function multiple times per block for the same `storage_key` does + /// not make much sense because it is not cumulative when called inside the same block. + /// Use this function to distribute the deletion of a single child trie across multiple + /// blocks. + #[version(2)] + fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> bool { + let child_info = ChildInfo::new_default(storage_key); + self.kill_child_storage(&child_info, limit) } /// Check a child storage key. @@ -360,11 +388,6 @@ pub trait Trie { /// Interface that provides miscellaneous functions for communicating between the runtime and the node. #[runtime_interface] pub trait Misc { - /// The current relay chain identifier. - fn chain_id(&self) -> u64 { - sp_externalities::Externalities::chain_id(*self) - } - /// Print a number. fn print_num(val: u64) { log::debug!(target: "runtime", "{}", val); @@ -707,6 +730,11 @@ pub trait Hashing { sp_core::hashing::keccak_256(data) } + /// Conduct a 512-bit Keccak hash. + fn keccak_512(data: &[u8]) -> [u8; 64] { + sp_core::hashing::keccak_512(data) + } + /// Conduct a 256-bit Sha2 hash. fn sha2_256(data: &[u8]) -> [u8; 32] { sp_core::hashing::sha2_256(data) @@ -1093,7 +1121,7 @@ mod tracing_setup { }; use super::{wasm_tracing, Crossing}; - const TRACING_SET : AtomicBool = AtomicBool::new(false); + static TRACING_SET: AtomicBool = AtomicBool::new(false); /// The PassingTracingSubscriber implements `tracing_core::Subscriber` diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index be4db5834458e..ee71687f1ef7c 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", path = "../core" } -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-core = { version = "3.0.0", path = "../core" } +sp-runtime = { version = "3.0.0", path = "../runtime" } lazy_static = "1.4.0" -strum = { version = "0.16.0", features = ["derive"] } +strum = { version = "0.20.0", features = ["derive"] } diff --git a/primitives/keyring/src/ed25519.rs b/primitives/keyring/src/ed25519.rs index 17882027387c5..c9dd70d63d5c9 100644 --- a/primitives/keyring/src/ed25519.rs +++ b/primitives/keyring/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/keyring/src/lib.rs b/primitives/keyring/src/lib.rs index 55ed14d294f1d..d7fb7c4fd2f2b 100644 --- a/primitives/keyring/src/lib.rs +++ b/primitives/keyring/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index 80397f0de9fc1..a4f43be07f07d 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index d53d1ebd533c8..81404ce344a21 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keystore" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,15 +15,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.30" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } futures = { version = "0.3.1" } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } -parking_lot = { version = "0.10.0", default-features = false } - -sp-core = { version = "2.0.0", path = "../core" } -sp-externalities = { version = "0.8.0", path = "../externalities", default-features = false } +parking_lot = { version = "0.11.1", default-features = false } +serde = { version = "1.0", optional = true} +sp-core = { version = "3.0.0", path = "../core" } +sp-externalities = { version = "0.9.0", path = "../externalities", default-features = false } [dev-dependencies] rand = "0.7.2" rand_chacha = "0.2.2" + + +[features] +default = ["std"] +std = [ + "serde", + "schnorrkel/std", + "schnorrkel/serde", +] diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 068c174aecdfa..f42f6dd7122d0 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -96,9 +96,9 @@ pub trait CryptoStore: Send + Sync { /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. async fn insert_unknown( &self, - _key_type: KeyTypeId, - _suri: &str, - _public: &[u8] + id: KeyTypeId, + suri: &str, + public: &[u8] ) -> Result<(), ()>; /// Find intersection between provided keys and supported keys diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index a5e460951493b..702e2bbc857d7 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/keystore/src/vrf.rs b/primitives/keystore/src/vrf.rs index 750ca0eac6be7..463a565f9d86c 100644 --- a/primitives/keystore/src/vrf.rs +++ b/primitives/keystore/src/vrf.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,11 @@ use codec::Encode; use merlin::Transcript; use schnorrkel::vrf::{VRFOutput, VRFProof}; + /// An enum whose variants represent possible /// accepted values to construct the VRF transcript #[derive(Clone, Encode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum VRFTranscriptValue { /// Value is an array of bytes Bytes(Vec), @@ -38,6 +40,7 @@ pub struct VRFTranscriptData { pub items: Vec<(&'static str, VRFTranscriptValue)>, } /// VRF signature data +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct VRFSignature { /// The VRFOutput serialized pub output: VRFOutput, @@ -67,7 +70,6 @@ pub fn make_transcript(data: VRFTranscriptData) -> Transcript { #[cfg(test)] mod tests { use super::*; - use crate::vrf::VRFTranscriptValue; use rand::RngCore; use rand_chacha::{ rand_core::SeedableRng, diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 4a66743028d19..79d46743cd758 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,16 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-npos-elections-compact = { version = "2.0.0", path = "./compact" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-npos-elections-compact = { version = "3.0.0", path = "./compact" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../arithmetic" } +sp-core = { version = "3.0.0", default-features = false, path = "../core" } [dev-dependencies] -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } rand = "0.7.3" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "3.0.0", path = "../runtime" } [features] default = ["std"] @@ -32,4 +33,5 @@ std = [ "serde", "sp-std/std", "sp-arithmetic/std", + "sp-core/std", ] diff --git a/primitives/npos-elections/README.md b/primitives/npos-elections/README.md index a98351a6d89a7..b518e63615fa6 100644 --- a/primitives/npos-elections/README.md +++ b/primitives/npos-elections/README.md @@ -1,11 +1,58 @@ A set of election algorithms to be used with a substrate runtime, typically within the staking -sub-system. Notable implementation include +sub-system. Notable implementation include: - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast election method that ensures PJR, but does not provide a constant factor approximation of the maximin problem. -- [`balance_solution`]: Implements the star balancing algorithm. This iterative process can - increase a solutions score, as described in [`evaluate_support`]. +- [`phragmms`]: Implements a hybrid approach inspired by Phragmén which is executed faster but + it can achieve a constant factor approximation of the maximin problem, similar to that of the + MMS algorithm. +- [`balance_solution`]: Implements the star balancing algorithm. This iterative process can push + a solution toward being more `balances`, which in turn can increase its score. + +### Terminology + +This crate uses context-independent words, not to be confused with staking. This is because the +election algorithms of this crate, while designed for staking, can be used in other contexts as +well. + +`Voter`: The entity casting some votes to a number of `Targets`. This is the same as `Nominator` +in the context of staking. `Target`: The entities eligible to be voted upon. This is the same as +`Validator` in the context of staking. `Edge`: A mapping from a `Voter` to a `Target`. + +The goal of an election algorithm is to provide an `ElectionResult`. A data composed of: +- `winners`: A flat list of identifiers belonging to those who have won the election, usually + ordered in some meaningful way. They are zipped with their total backing stake. +- `assignment`: A mapping from each voter to their winner-only targets, zipped with a ration + denoting the amount of support given to that particular target. + +```rust +// the winners. +let winners = vec![(1, 100), (2, 50)]; +let assignments = vec![ + // A voter, giving equal backing to both 1 and 2. + Assignment { + who: 10, + distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))], + }, + // A voter, Only backing 1. + Assignment { who: 20, distribution: vec![(1, Perbill::from_percent(100))] }, +]; + +// the combination of the two makes the election result. +let election_result = ElectionResult { winners, assignments }; + +``` + +The `Assignment` field of the election result is voter-major, i.e. it is from the perspective of +the voter. The struct that represents the opposite is called a `Support`. This struct is usually +accessed in a map-like manner, i.e. keyed vy voters, therefor it is stored as a mapping called +`SupportMap`. + +Moreover, the support is built from absolute backing values, not ratios like the example above. +A struct similar to `Assignment` that has stake value instead of ratios is called an +`StakedAssignment`. + More information can be found at: https://arxiv.org/abs/2004.12990 diff --git a/primitives/npos-elections/benches/phragmen.rs b/primitives/npos-elections/benches/phragmen.rs index ce4e0196ab4f7..d48c246558844 100644 --- a/primitives/npos-elections/benches/phragmen.rs +++ b/primitives/npos-elections/benches/phragmen.rs @@ -30,7 +30,7 @@ use sp_npos_elections::{ElectionResult, VoteWeight}; use std::collections::BTreeMap; use sp_runtime::{Perbill, PerThing, traits::Zero}; use sp_npos_elections::{ - balance_solution, assignment_ratio_to_staked, build_support_map, to_without_backing, VoteWeight, + balance_solution, assignment_ratio_to_staked, to_support_map, to_without_backing, VoteWeight, ExtendedBalance, Assignment, StakedAssignment, IdentifierT, assignment_ratio_to_staked, seq_phragmen, }; @@ -65,7 +65,6 @@ mod bench_closure_and_slice { ) -> Vec> where T: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, { ratio .into_iter() @@ -149,7 +148,7 @@ fn do_phragmen( if eq_iters > 0 { let staked = assignment_ratio_to_staked(assignments, &stake_of); let winners = to_without_backing(winners); - let mut support = build_support_map( + let mut support = to_support_map( winners.as_ref(), staked.as_ref(), ).unwrap(); diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index 1873f8fa16057..57cb6dc1c4f22 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections-compact" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.7", features = ["full", "visit"] } +syn = { version = "1.0.58", features = ["full", "visit"] } quote = "1.0" proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" diff --git a/primitives/npos-elections/compact/src/assignment.rs b/primitives/npos-elections/compact/src/assignment.rs index 8b61076521d7c..12f5ca2b41735 100644 --- a/primitives/npos-elections/compact/src/assignment.rs +++ b/primitives/npos-elections/compact/src/assignment.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use crate::field_name_for; use proc_macro2::TokenStream as TokenStream2; use quote::quote; -fn from_impl(count: usize) -> TokenStream2 { +pub(crate) fn from_impl(count: usize) -> TokenStream2 { let from_impl_single = { let name = field_name_for(1); quote!(1 => compact.#name.push( @@ -73,7 +73,7 @@ fn from_impl(count: usize) -> TokenStream2 { ) } -fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { +pub(crate) fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { let into_impl_single = { let name = field_name_for(1); quote!( @@ -153,53 +153,3 @@ fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { #into_impl_rest ) } - -pub(crate) fn assignment( - ident: syn::Ident, - voter_type: syn::Type, - target_type: syn::Type, - weight_type: syn::Type, - count: usize, -) -> TokenStream2 { - let from_impl = from_impl(count); - let into_impl = into_impl(count, weight_type.clone()); - - quote!( - use _npos::__OrInvalidIndex; - impl #ident { - pub fn from_assignment( - assignments: Vec<_npos::Assignment>, - index_of_voter: FV, - index_of_target: FT, - ) -> Result - where - A: _npos::IdentifierT, - for<'r> FV: Fn(&'r A) -> Option<#voter_type>, - for<'r> FT: Fn(&'r A) -> Option<#target_type>, - { - let mut compact: #ident = Default::default(); - - for _npos::Assignment { who, distribution } in assignments { - match distribution.len() { - 0 => continue, - #from_impl - _ => { - return Err(_npos::Error::CompactTargetOverflow); - } - } - }; - Ok(compact) - } - - pub fn into_assignment( - self, - voter_at: impl Fn(#voter_type) -> Option, - target_at: impl Fn(#target_type) -> Option, - ) -> Result>, _npos::Error> { - let mut assignments: Vec<_npos::Assignment> = Default::default(); - #into_impl - Ok(assignments) - } - } - ) -} diff --git a/primitives/npos-elections/compact/src/codec.rs b/primitives/npos-elections/compact/src/codec.rs index 6c5a3bc2134d3..6e8d4d9277dbd 100644 --- a/primitives/npos-elections/compact/src/codec.rs +++ b/primitives/npos-elections/compact/src/codec.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index b35c407c40cd5..191998a341924 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -58,8 +58,8 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// /// The given struct provides function to convert from/to Assignment: /// -/// - [`from_assignment()`]. -/// - [`fn into_assignment()`]. +/// - `fn from_assignment<..>(..)` +/// - `fn into_assignment<..>(..)` /// /// The generated struct is by default deriving both `Encode` and `Decode`. This is okay but could /// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding @@ -95,19 +95,11 @@ pub fn generate_solution_type(item: TokenStream) -> TokenStream { compact_encoding, ).unwrap_or_else(|e| e.to_compile_error()); - let assignment_impls = assignment::assignment( - ident.clone(), - voter_type.clone(), - target_type.clone(), - weight_type.clone(), - count, - ); - quote!( #imports #solution_struct - #assignment_impls - ).into() + ) + .into() } fn struct_def( @@ -125,29 +117,32 @@ fn struct_def( let singles = { let name = field_name_for(1); + // NOTE: we use the visibility of the struct for the fields as well.. could be made better. quote!( - #name: Vec<(#voter_type, #target_type)>, + #vis #name: Vec<(#voter_type, #target_type)>, ) }; let doubles = { let name = field_name_for(2); quote!( - #name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>, + #vis #name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>, ) }; - let rest = (3..=count).map(|c| { - let field_name = field_name_for(c); - let array_len = c - 1; - quote!( - #field_name: Vec<( - #voter_type, - [(#target_type, #weight_type); #array_len], - #target_type - )>, - ) - }).collect::(); + let rest = (3..=count) + .map(|c| { + let field_name = field_name_for(c); + let array_len = c - 1; + quote!( + #vis #field_name: Vec<( + #voter_type, + [(#target_type, #weight_type); #array_len], + #target_type + )>, + ) + }) + .collect::(); let len_impl = len_impl(count); let edge_count_impl = edge_count_impl(count); @@ -172,40 +167,38 @@ fn struct_def( quote!(#[derive(Default, PartialEq, Eq, Clone, Debug, _npos::codec::Encode, _npos::codec::Decode)]) }; + let from_impl = assignment::from_impl(count); + let into_impl = assignment::into_impl(count, weight_type.clone()); + Ok(quote! ( /// A struct to encode a election assignment in a compact way. #derives_and_maybe_compact_encoding #vis struct #ident { #singles #doubles #rest } - impl _npos::VotingLimit for #ident { + use _npos::__OrInvalidIndex; + impl _npos::CompactSolution for #ident { const LIMIT: usize = #count; - } + type Voter = #voter_type; + type Target = #target_type; + type Accuracy = #weight_type; - impl #ident { - /// Get the length of all the assignments that this type is encoding. This is basically - /// the same as the number of assignments, or the number of voters in total. - pub fn len(&self) -> usize { + fn voter_count(&self) -> usize { let mut all_len = 0usize; #len_impl all_len } - /// Get the total count of edges. - pub fn edge_count(&self) -> usize { + fn edge_count(&self) -> usize { let mut all_edges = 0usize; #edge_count_impl all_edges } - /// Get the number of unique targets in the whole struct. - /// - /// Once presented with a list of winners, this set and the set of winners must be - /// equal. - /// - /// The resulting indices are sorted. - pub fn unique_targets(&self) -> Vec<#target_type> { - let mut all_targets: Vec<#target_type> = Vec::with_capacity(self.average_edge_count()); - let mut maybe_insert_target = |t: #target_type| { + fn unique_targets(&self) -> Vec { + // NOTE: this implementation returns the targets sorted, but we don't use it yet per + // se, nor is the API enforcing it. + let mut all_targets: Vec = Vec::with_capacity(self.average_edge_count()); + let mut maybe_insert_target = |t: Self::Target| { match all_targets.binary_search(&t) { Ok(_) => (), Err(pos) => all_targets.insert(pos, t) @@ -217,22 +210,44 @@ fn struct_def( all_targets } - /// Get the average edge count. - pub fn average_edge_count(&self) -> usize { - self.edge_count().checked_div(self.len()).unwrap_or(0) - } - - /// Remove a certain voter. - /// - /// This will only search until the first instance of `to_remove`, and return true. If - /// no instance is found (no-op), then it returns false. - /// - /// In other words, if this return true, exactly one element must have been removed from - /// `self.len()`. - pub fn remove_voter(&mut self, to_remove: #voter_type) -> bool { + fn remove_voter(&mut self, to_remove: Self::Voter) -> bool { #remove_voter_impl return false } + + fn from_assignment( + assignments: Vec<_npos::Assignment>, + index_of_voter: FV, + index_of_target: FT, + ) -> Result + where + A: _npos::IdentifierT, + for<'r> FV: Fn(&'r A) -> Option, + for<'r> FT: Fn(&'r A) -> Option, + { + let mut compact: #ident = Default::default(); + + for _npos::Assignment { who, distribution } in assignments { + match distribution.len() { + 0 => continue, + #from_impl + _ => { + return Err(_npos::Error::CompactTargetOverflow); + } + } + }; + Ok(compact) + } + + fn into_assignment( + self, + voter_at: impl Fn(Self::Voter) -> Option, + target_at: impl Fn(Self::Target) -> Option, + ) -> Result>, _npos::Error> { + let mut assignments: Vec<_npos::Assignment> = Default::default(); + #into_impl + Ok(assignments) + } } )) } diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 49740b2cf3cae..bac8a165f3947 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,12 +14,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-npos-elections = { version = "2.0.0", path = ".." } -sp-std = { version = "2.0.0", path = "../../std" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } +sp-npos-elections = { version = "3.0.0", path = ".." } +sp-std = { version = "3.0.0", path = "../../std" } +sp-runtime = { version = "3.0.0", path = "../../runtime" } honggfuzz = "0.5" rand = { version = "0.7.3", features = ["std", "small_rng"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [[bin]] name = "reduce" diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index a5099098f5a86..29f0247f84f31 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs index 67cc7ba3c9a9a..4ff18e95d1ef1 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ mod common; use common::*; use honggfuzz::fuzz; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, build_support_map, to_without_backing, VoteWeight, - evaluate_support, is_score_better, seq_phragmen, + assignment_ratio_to_staked_normalized, is_score_better, seq_phragmen, to_supports, + to_without_backing, EvaluateSupport, VoteWeight, }; use sp_runtime::Perbill; use rand::{self, SeedableRng}; @@ -66,11 +66,14 @@ fn main() { }; let unbalanced_score = { - let staked = assignment_ratio_to_staked_normalized(unbalanced.assignments.clone(), &stake_of).unwrap(); + let staked = assignment_ratio_to_staked_normalized( + unbalanced.assignments.clone(), + &stake_of, + ) + .unwrap(); let winners = to_without_backing(unbalanced.winners.clone()); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); + let score = to_supports(winners.as_ref(), staked.as_ref()).unwrap().evaluate(); - let score = evaluate_support(&support); if score[0] == 0 { // such cases cannot be improved by balancing. return; @@ -87,11 +90,13 @@ fn main() { ).unwrap(); let balanced_score = { - let staked = assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of).unwrap(); + let staked = assignment_ratio_to_staked_normalized( + balanced.assignments.clone(), + &stake_of, + ).unwrap(); let winners = to_without_backing(balanced.winners); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); + to_supports(winners.as_ref(), staked.as_ref()).unwrap().evaluate() - evaluate_support(&support) }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); diff --git a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs index 0aada6a5624dd..8ce7e7d415fa2 100644 --- a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ mod common; use common::*; use honggfuzz::fuzz; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, build_support_map, to_without_backing, VoteWeight, - evaluate_support, is_score_better, phragmms, + assignment_ratio_to_staked_normalized, is_score_better, phragmms, to_supports, + to_without_backing, EvaluateSupport, VoteWeight, }; use sp_runtime::Perbill; use rand::{self, SeedableRng}; @@ -66,11 +66,14 @@ fn main() { }; let unbalanced_score = { - let staked = assignment_ratio_to_staked_normalized(unbalanced.assignments.clone(), &stake_of).unwrap(); + let staked = assignment_ratio_to_staked_normalized( + unbalanced.assignments.clone(), + &stake_of, + ) + .unwrap(); let winners = to_without_backing(unbalanced.winners.clone()); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); + let score = to_supports(&winners, &staked).unwrap().evaluate(); - let score = evaluate_support(&support); if score[0] == 0 { // such cases cannot be improved by balancing. return; @@ -86,11 +89,13 @@ fn main() { ).unwrap(); let balanced_score = { - let staked = assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of).unwrap(); + let staked = + assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of) + .unwrap(); let winners = to_without_backing(balanced.winners); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); - - evaluate_support(&support) + to_supports(winners.as_ref(), staked.as_ref()) + .unwrap() + .evaluate() }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); diff --git a/primitives/npos-elections/fuzzer/src/reduce.rs b/primitives/npos-elections/fuzzer/src/reduce.rs index 0f0d9893e048e..4ee2468d9d140 100644 --- a/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/primitives/npos-elections/fuzzer/src/reduce.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,8 +34,8 @@ use honggfuzz::fuzz; mod common; use common::to_range; -use sp_npos_elections::{StakedAssignment, ExtendedBalance, build_support_map, reduce}; -use rand::{self, Rng, SeedableRng, RngCore}; +use sp_npos_elections::{reduce, to_support_map, ExtendedBalance, StakedAssignment}; +use rand::{self, Rng, RngCore, SeedableRng}; type Balance = u128; type AccountId = u64; @@ -109,9 +109,8 @@ fn assert_assignments_equal( ass1: &Vec>, ass2: &Vec>, ) { - - let support_1 = build_support_map::(winners, ass1).unwrap(); - let support_2 = build_support_map::(winners, ass2).unwrap(); + let support_1 = to_support_map::(winners, ass1).unwrap(); + let support_2 = to_support_map::(winners, ass2).unwrap(); for (who, support) in support_1.iter() { assert_eq!(support.total, support_2.get(who).unwrap().total); diff --git a/primitives/npos-elections/src/balancing.rs b/primitives/npos-elections/src/balancing.rs index 04083cc9b0d43..48cb980d78c33 100644 --- a/primitives/npos-elections/src/balancing.rs +++ b/primitives/npos-elections/src/balancing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,7 +36,7 @@ use sp_std::prelude::*; /// change has been made (`difference = 0`). /// /// In almost all cases, a balanced solution will have a better score than an unbalanced solution, -/// yet this is not 100% guaranteed because the first element of a [`ElectionScore`] does not +/// yet this is not 100% guaranteed because the first element of a [`crate::ElectionScore`] does not /// directly related to balancing. /// /// Note that some reference implementation adopt an approach in which voters are balanced randomly diff --git a/primitives/npos-elections/src/helpers.rs b/primitives/npos-elections/src/helpers.rs index bfde63676c6e8..10a49a084f102 100644 --- a/primitives/npos-elections/src/helpers.rs +++ b/primitives/npos-elections/src/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,19 @@ //! Helper methods for npos-elections. -use crate::{ - Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf, Error, -}; -use sp_arithmetic::{PerThing, InnerOf}; +use crate::{Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight, WithApprovalOf}; +use sp_arithmetic::PerThing; use sp_std::prelude::*; /// Converts a vector of ratio assignments into ones with absolute budget value. /// /// Note that this will NOT attempt at normalizing the result. -pub fn assignment_ratio_to_staked( +pub fn assignment_ratio_to_staked( ratios: Vec>, stake_of: FS, ) -> Vec> where for<'r> FS: Fn(&'r A) -> VoteWeight, - P: sp_std::ops::Mul, - ExtendedBalance: From>, { ratios .into_iter() @@ -45,19 +41,20 @@ where } /// Same as [`assignment_ratio_to_staked`] and try and do normalization. -pub fn assignment_ratio_to_staked_normalized( +pub fn assignment_ratio_to_staked_normalized( ratio: Vec>, stake_of: FS, ) -> Result>, Error> where for<'r> FS: Fn(&'r A) -> VoteWeight, - P: sp_std::ops::Mul, - ExtendedBalance: From>, { let mut staked = assignment_ratio_to_staked(ratio, &stake_of); - staked.iter_mut().map(|a| - a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err)) - ).collect::>()?; + staked + .iter_mut() + .map(|a| { + a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err)) + }) + .collect::>()?; Ok(staked) } @@ -66,24 +63,19 @@ where /// Note that this will NOT attempt at normalizing the result. pub fn assignment_staked_to_ratio( staked: Vec>, -) -> Vec> -where - ExtendedBalance: From>, -{ +) -> Vec> { staked.into_iter().map(|a| a.into_assignment()).collect() } /// Same as [`assignment_staked_to_ratio`] and try and do normalization. -pub fn assignment_staked_to_ratio_normalized( +pub fn assignment_staked_to_ratio_normalized( staked: Vec>, -) -> Result>, Error> -where - ExtendedBalance: From>, -{ +) -> Result>, Error> { let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::>(); - ratio.iter_mut().map(|a| - a.try_normalize().map_err(|err| Error::ArithmeticError(err)) - ).collect::>()?; + ratio + .iter_mut() + .map(|a| a.try_normalize().map_err(|err| Error::ArithmeticError(err))) + .collect::>()?; Ok(ratio) } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 11951d2065989..d45698e1747bb 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at @@ -18,11 +18,11 @@ //! - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast //! election method that ensures PJR, but does not provide a constant factor approximation of the //! maximin problem. -//! - [`phragmms`]: Implements a hybrid approach inspired by Phragmén which is executed faster but +//! - [`phragmms()`]: Implements a hybrid approach inspired by Phragmén which is executed faster but //! it can achieve a constant factor approximation of the maximin problem, similar to that of the //! MMS algorithm. -//! - [`balance_solution`]: Implements the star balancing algorithm. This iterative process can push -//! a solution toward being more `balances`, which in turn can increase its score. +//! - [`balance`]: Implements the star balancing algorithm. This iterative process can push a +//! solution toward being more `balances`, which in turn can increase its score. //! //! ### Terminology //! @@ -57,12 +57,11 @@ //! //! // the combination of the two makes the election result. //! let election_result = ElectionResult { winners, assignments }; -//! //! ``` //! //! The `Assignment` field of the election result is voter-major, i.e. it is from the perspective of //! the voter. The struct that represents the opposite is called a `Support`. This struct is usually -//! accessed in a map-like manner, i.e. keyed vy voters, therefor it is stored as a mapping called +//! accessed in a map-like manner, i.e. keyed by voters, therefor it is stored as a mapping called //! `SupportMap`. //! //! Moreover, the support is built from absolute backing values, not ratios like the example above. @@ -70,22 +69,29 @@ //! `StakedAssignment`. //! //! -//! More information can be found at: https://arxiv.org/abs/2004.12990 +//! More information can be found at: #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{ - prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, rc::Rc, cell::RefCell, -}; use sp_arithmetic::{ - PerThing, Rational128, ThresholdOrd, InnerOf, Normalizable, - traits::{Zero, Bounded}, + traits::{Bounded, UniqueSaturatedInto, Zero}, + Normalizable, PerThing, Rational128, ThresholdOrd, }; +use sp_std::{ + cell::RefCell, + cmp::Ordering, + collections::btree_map::BTreeMap, + convert::{TryFrom, TryInto}, + fmt::Debug, + ops::Mul, + prelude::*, + rc::Rc, +}; +use sp_core::RuntimeDebug; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -#[cfg(feature = "std")] -use codec::{Encode, Decode}; +use serde::{Deserialize, Serialize}; #[cfg(test)] mod mock; @@ -125,22 +131,106 @@ impl __OrInvalidIndex for Option { } } -// re-export the compact solution type. -pub use sp_npos_elections_compact::generate_solution_type; - -/// A trait to limit the number of votes per voter. The generated compact type will implement this. -pub trait VotingLimit { +/// A common interface for all compact solutions. +/// +/// See [`sp-npos-elections-compact`] for more info. +pub trait CompactSolution: Sized { + /// The maximum number of votes that are allowed. const LIMIT: usize; + + /// The voter type. Needs to be an index (convert to usize). + type Voter: UniqueSaturatedInto + TryInto + TryFrom + Debug + Copy + Clone; + + /// The target type. Needs to be an index (convert to usize). + type Target: UniqueSaturatedInto + TryInto + TryFrom + Debug + Copy + Clone; + + /// The weight/accuracy type of each vote. + type Accuracy: PerThing128; + + /// Build self from a `assignments: Vec>`. + fn from_assignment( + assignments: Vec>, + voter_index: FV, + target_index: FT, + ) -> Result + where + A: IdentifierT, + for<'r> FV: Fn(&'r A) -> Option, + for<'r> FT: Fn(&'r A) -> Option; + + /// Convert self into a `Vec>` + fn into_assignment( + self, + voter_at: impl Fn(Self::Voter) -> Option, + target_at: impl Fn(Self::Target) -> Option, + ) -> Result>, Error>; + + /// Get the length of all the voters that this type is encoding. + /// + /// This is basically the same as the number of assignments, or number of active voters. + fn voter_count(&self) -> usize; + + /// Get the total count of edges. + /// + /// This is effectively in the range of {[`Self::voter_count`], [`Self::voter_count`] * + /// [`Self::LIMIT`]}. + fn edge_count(&self) -> usize; + + /// Get the number of unique targets in the whole struct. + /// + /// Once presented with a list of winners, this set and the set of winners must be + /// equal. + fn unique_targets(&self) -> Vec; + + /// Get the average edge count. + fn average_edge_count(&self) -> usize { + self.edge_count() + .checked_div(self.voter_count()) + .unwrap_or(0) + } + + /// Remove a certain voter. + /// + /// This will only search until the first instance of `to_remove`, and return true. If + /// no instance is found (no-op), then it returns false. + /// + /// In other words, if this return true, exactly **one** element must have been removed from + /// `self.len()`. + fn remove_voter(&mut self, to_remove: Self::Voter) -> bool; + + /// Compute the score of this compact solution type. + fn score( + self, + winners: &[A], + stake_of: FS, + voter_at: impl Fn(Self::Voter) -> Option, + target_at: impl Fn(Self::Target) -> Option, + ) -> Result + where + for<'r> FS: Fn(&'r A) -> VoteWeight, + A: IdentifierT, + { + let ratio = self.into_assignment(voter_at, target_at)?; + let staked = helpers::assignment_ratio_to_staked_normalized(ratio, stake_of)?; + let supports = to_supports(winners, &staked)?; + Ok(supports.evaluate()) + } } +// re-export the compact solution type. +pub use sp_npos_elections_compact::generate_solution_type; + /// an aggregator trait for a generic type of a voter/target identifier. This usually maps to /// substrate's account id. pub trait IdentifierT: Clone + Eq + Default + Ord + Debug + codec::Codec {} - impl IdentifierT for T {} +/// Aggregator trait for a PerThing that can be multiplied by u128 (ExtendedBalance). +pub trait PerThing128: PerThing + Mul {} +impl> PerThing128 for T {} + /// The errors that might occur in the this crate and compact. -#[derive(Debug, Eq, PartialEq)] +#[derive(Eq, PartialEq, RuntimeDebug)] pub enum Error { /// While going from compact to staked, the stake of all the edges has gone above the total and /// the last stake cannot be assigned. @@ -151,6 +241,8 @@ pub enum Error { CompactInvalidIndex, /// An error occurred in some arithmetic operation. ArithmeticError(&'static str), + /// The data provided to create support map was invalid. + InvalidSupportEdge, } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -160,7 +252,8 @@ pub type VoteWeight = u64; /// A type in which performing operations on vote weights are safe. pub type ExtendedBalance = u128; -/// The score of an assignment. This can be computed from the support map via [`evaluate_support`]. +/// The score of an assignment. This can be computed from the support map via +/// [`EvaluateSupport::evaluate`]. pub type ElectionScore = [ExtendedBalance; 3]; /// A winner, with their respective approval stake. @@ -170,7 +263,7 @@ pub type WithApprovalOf = (A, ExtendedBalance); pub type CandidatePtr = Rc>>; /// A candidate entity for the election. -#[derive(Debug, Clone, Default)] +#[derive(RuntimeDebug, Clone, Default)] pub struct Candidate { /// Identifier. who: AccountId, @@ -238,14 +331,14 @@ impl Voter { /// Note that this might create _un-normalized_ assignments, due to accuracy loss of `P`. Call /// site might compensate by calling `normalize()` on the returned `Assignment` as a /// post-precessing. - pub fn into_assignment(self) -> Option> - where - ExtendedBalance: From>, - { + pub fn into_assignment(self) -> Option> { let who = self.who; let budget = self.budget; - let distribution = self.edges.into_iter().filter_map(|e| { - let per_thing = P::from_rational_approximation(e.weight, budget); + let distribution = self + .edges + .into_iter() + .filter_map(|e| { + let per_thing = P::from_rational_approximation(e.weight, budget); // trim zero edges. if per_thing.is_zero() { None } else { Some((e.who, per_thing)) } }).collect::>(); @@ -283,7 +376,7 @@ impl Voter { }) } - /// Same as [`try_normalize`] but the normalization is only limited between elected edges. + /// Same as [`Self::try_normalize`] but the normalization is only limited between elected edges. pub fn try_normalize_elected(&mut self) -> Result<(), &'static str> { let elected_edge_weights = self .edges @@ -311,7 +404,7 @@ impl Voter { } /// Final result of the election. -#[derive(Debug)] +#[derive(RuntimeDebug)] pub struct ElectionResult { /// Just winners zipped with their approval stake. Note that the approval stake is merely the /// sub of their received stake and could be used for very basic sorting and approval voting. @@ -322,7 +415,7 @@ pub struct ElectionResult { } /// A voter's stake assignment among a set of targets, represented as ratios. -#[derive(Debug, Clone, Default)] +#[derive(RuntimeDebug, Clone, Default)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] pub struct Assignment { /// Voter's identifier. @@ -331,24 +424,20 @@ pub struct Assignment { pub distribution: Vec<(AccountId, P)>, } -impl Assignment -where - ExtendedBalance: From>, -{ +impl Assignment { /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. /// - /// It needs `stake` which is the total budget of the voter. If `fill` is set to true, it - /// _tries_ to ensure that all the potential rounding errors are compensated and the - /// distribution's sum is exactly equal to the total budget, by adding or subtracting the - /// remainder from the last distribution. + /// It needs `stake` which is the total budget of the voter. + /// + /// Note that this might create _un-normalized_ assignments, due to accuracy loss of `P`. Call + /// site might compensate by calling `try_normalize()` on the returned `StakedAssignment` as a + /// post-precessing. /// /// If an edge ratio is [`Bounded::min_value()`], it is dropped. This edge can never mean /// anything useful. - pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment - where - P: sp_std::ops::Mul, - { - let distribution = self.distribution + pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment { + let distribution = self + .distribution .into_iter() .filter_map(|(target, p)| { // if this ratio is zero, then skip it. @@ -396,7 +485,7 @@ where /// A voter's stake assignment among a set of targets, represented as absolute values in the scale /// of [`ExtendedBalance`]. -#[derive(Debug, Clone, Default)] +#[derive(RuntimeDebug, Clone, Default)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] pub struct StakedAssignment { /// Voter's identifier @@ -408,11 +497,8 @@ pub struct StakedAssignment { impl StakedAssignment { /// Converts self into the normal [`Assignment`] type. /// - /// If `fill` is set to true, it _tries_ to ensure that all the potential rounding errors are - /// compensated and the distribution's sum is exactly equal to 100%, by adding or subtracting - /// the remainder from the last distribution. - /// - /// NOTE: it is quite critical that this attempt always works. The data type returned here will + /// NOTE: This will always round down, and thus the results might be less than a full 100% `P`. + /// Use a normalization post-processing to fix this. The data type returned here will /// potentially get used to create a compact type; a compact type requires sum of ratios to be /// less than 100% upon un-compacting. /// @@ -420,7 +506,6 @@ impl StakedAssignment { /// can never be re-created and does not mean anything useful anymore. pub fn into_assignment(self) -> Assignment where - ExtendedBalance: From>, AccountId: IdentifierT, { let stake = self.total(); @@ -479,8 +564,8 @@ impl StakedAssignment { /// /// This, at the current version, resembles the `Exposure` defined in the Staking pallet, yet they /// do not necessarily have to be the same. -#[derive(Default, Debug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Eq, PartialEq))] +#[derive(Default, RuntimeDebug, Encode, Decode, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Support { /// Total support. pub total: ExtendedBalance, @@ -488,51 +573,43 @@ pub struct Support { pub voters: Vec<(AccountId, ExtendedBalance)>, } -/// A linkage from a candidate and its [`Support`]. -pub type SupportMap = BTreeMap>; - -/// Build the support map from the given election result. It maps a flat structure like -/// -/// ```nocompile -/// assignments: vec![ -/// voter1, vec![(candidate1, w11), (candidate2, w12)], -/// voter2, vec![(candidate1, w21), (candidate2, w22)] -/// ] -/// ``` +/// A target-major representation of the the election outcome. /// -/// into a mapping of candidates and their respective support: +/// Essentially a flat variant of [`SupportMap`]. /// -/// ```nocompile -/// SupportMap { -/// candidate1: Support { -/// own:0, -/// total: w11 + w21, -/// others: vec![(candidate1, w11), (candidate2, w21)] -/// }, -/// candidate2: Support { -/// own:0, -/// total: w12 + w22, -/// others: vec![(candidate1, w12), (candidate2, w22)] -/// }, -/// } -/// ``` +/// The main advantage of this is that it is encodable. +pub type Supports = Vec<(A, Support)>; + +/// Linkage from a winner to their [`Support`]. /// -/// The second returned flag indicates the number of edges who didn't corresponded to an actual -/// winner from the given winner set. A value in this place larger than 0 indicates a potentially -/// faulty assignment. +/// This is more helpful than a normal [`Supports`] as it allows faster error checking. +pub type SupportMap = BTreeMap>; + +/// Helper trait to convert from a support map to a flat support vector. +pub trait FlattenSupportMap { + /// Flatten the support. + fn flatten(self) -> Supports; +} + +impl FlattenSupportMap for SupportMap { + fn flatten(self) -> Supports { + self.into_iter().collect::>() + } +} + +/// Build the support map from the winners and assignments. /// -/// `O(E)` where `E` is the total number of edges. -pub fn build_support_map( - winners: &[AccountId], - assignments: &[StakedAssignment], -) -> Result, AccountId> where - AccountId: IdentifierT, -{ +/// The list of winners is basically a redundancy for error checking only; It ensures that all the +/// targets pointed to by the [`Assignment`] are present in the `winners`. +pub fn to_support_map( + winners: &[A], + assignments: &[StakedAssignment], +) -> Result, Error> { // Initialize the support of each candidate. - let mut supports = >::new(); - winners - .iter() - .for_each(|e| { supports.insert(e.clone(), Default::default()); }); + let mut supports = >::new(); + winners.iter().for_each(|e| { + supports.insert(e.clone(), Default::default()); + }); // build support struct. for StakedAssignment { who, distribution } in assignments.iter() { @@ -541,37 +618,83 @@ pub fn build_support_map( support.total = support.total.saturating_add(*weight_extended); support.voters.push((who.clone(), *weight_extended)); } else { - return Err(c.clone()) + return Err(Error::InvalidSupportEdge) } } } Ok(supports) } -/// Evaluate a support map. The returned tuple contains: +/// Same as [`to_support_map`] except it calls `FlattenSupportMap` on top of the result to return a +/// flat vector. /// -/// - Minimum support. This value must be **maximized**. -/// - Sum of all supports. This value must be **maximized**. -/// - Sum of all supports squared. This value must be **minimized**. +/// Similar to [`to_support_map`], `winners` is used for error checking. +pub fn to_supports( + winners: &[A], + assignments: &[StakedAssignment], +) -> Result, Error> { + to_support_map(winners, assignments).map(FlattenSupportMap::flatten) +} + +/// Extension trait for evaluating a support map or vector. +pub trait EvaluateSupport { + /// Evaluate a support map. The returned tuple contains: + /// + /// - Minimum support. This value must be **maximized**. + /// - Sum of all supports. This value must be **maximized**. + /// - Sum of all supports squared. This value must be **minimized**. + fn evaluate(self) -> ElectionScore; +} + +/// A common wrapper trait for both (&A, &B) and &(A, B). /// -/// `O(E)` where `E` is the total number of edges. -pub fn evaluate_support( - support: &SupportMap, -) -> ElectionScore { - let mut min_support = ExtendedBalance::max_value(); - let mut sum: ExtendedBalance = Zero::zero(); - // NOTE: The third element might saturate but fine for now since this will run on-chain and need - // to be fast. - let mut sum_squared: ExtendedBalance = Zero::zero(); - for (_, support) in support.iter() { - sum = sum.saturating_add(support.total); - let squared = support.total.saturating_mul(support.total); - sum_squared = sum_squared.saturating_add(squared); - if support.total < min_support { - min_support = support.total; +/// This allows us to implemented something for both `Vec<_>` and `BTreeMap<_>`, such as +/// [`EvaluateSupport`]. +pub trait TupleRef { + fn extract(&self) -> (&K, &V); +} + +impl TupleRef for &(K, V) { + fn extract(&self) -> (&K, &V) { + (&self.0, &self.1) + } +} + +impl TupleRef for (K, V) { + fn extract(&self) -> (&K, &V) { + (&self.0, &self.1) + } +} + +impl TupleRef for (&K, &V) { + fn extract(&self) -> (&K, &V) { + (self.0, self.1) + } +} + +impl EvaluateSupport for C +where + C: IntoIterator, + I: TupleRef>, + A: IdentifierT, +{ + fn evaluate(self) -> ElectionScore { + let mut min_support = ExtendedBalance::max_value(); + let mut sum: ExtendedBalance = Zero::zero(); + // NOTE: The third element might saturate but fine for now since this will run on-chain and + // need to be fast. + let mut sum_squared: ExtendedBalance = Zero::zero(); + for item in self { + let (_, support) = item.extract(); + sum = sum.saturating_add(support.total); + let squared = support.total.saturating_mul(support.total); + sum_squared = sum_squared.saturating_add(squared); + if support.total < min_support { + min_support = support.total; + } } + [min_support, sum, sum_squared] } - [min_support, sum, sum_squared] } /// Compares two sets of election scores based on desirability and returns true if `this` is better @@ -581,15 +704,13 @@ pub fn evaluate_support( /// greater or less than `that`. /// /// Note that the third component should be minimized. -pub fn is_score_better(this: ElectionScore, that: ElectionScore, epsilon: P) -> bool - where ExtendedBalance: From> -{ +pub fn is_score_better(this: ElectionScore, that: ElectionScore, epsilon: P) -> bool { match this .iter() - .enumerate() - .map(|(i, e)| ( - e.ge(&that[i]), - e.tcmp(&that[i], epsilon.mul_ceil(that[i])), + .zip(that.iter()) + .map(|(thi, tha)| ( + thi.ge(&tha), + thi.tcmp(&tha, epsilon.mul_ceil(*tha)), )) .collect::>() .as_slice() @@ -629,7 +750,7 @@ pub(crate) fn setup_inputs( }) .collect::>>(); - let voters = initial_voters.into_iter().map(|(who, voter_stake, votes)| { + let voters = initial_voters.into_iter().filter_map(|(who, voter_stake, votes)| { let mut edges: Vec> = Vec::with_capacity(votes.len()); for v in votes { if edges.iter().any(|e| e.who == v) { @@ -650,12 +771,18 @@ pub(crate) fn setup_inputs( ); } // else {} would be wrong votes. We don't really care about it. } - Voter { - who, - edges: edges, - budget: voter_stake.into(), - load: Rational128::zero(), + if edges.is_empty() { + None } + else { + Some(Voter { + who, + edges: edges, + budget: voter_stake.into(), + load: Rational128::zero(), + }) + } + }).collect::>(); (candidates, voters,) diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 75ff292450df6..ea8f3780e0e6a 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,13 @@ #![cfg(test)] -use crate::{seq_phragmen, ElectionResult, Assignment, VoteWeight, ExtendedBalance}; -use sp_arithmetic::{PerThing, InnerOf, traits::{SaturatedConversion, Zero, One}}; -use sp_std::collections::btree_map::BTreeMap; +use crate::*; +use sp_arithmetic::{ + traits::{One, SaturatedConversion, Zero}, + PerThing, +}; use sp_runtime::assert_eq_error_rate; +use sp_std::collections::btree_map::BTreeMap; #[derive(Default, Debug)] pub(crate) struct _Candidate { @@ -313,15 +316,12 @@ pub fn check_assignments_sum(assignments: Vec( +pub(crate) fn run_and_compare( candidates: Vec, voters: Vec<(AccountId, Vec)>, stake_of: &Box VoteWeight>, to_elect: usize, -) where - ExtendedBalance: From>, - Output: sp_std::ops::Mul, -{ +) { // run fixed point code. let ElectionResult { winners, assignments } = seq_phragmen::<_, Output>( to_elect, diff --git a/primitives/npos-elections/src/node.rs b/primitives/npos-elections/src/node.rs index d18c0e9016b64..ae65318ff0461 100644 --- a/primitives/npos-elections/src/node.rs +++ b/primitives/npos-elections/src/node.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index cfbeed1cdd3fb..dad65666738c7 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,15 +21,15 @@ //! to the Maximin problem. use crate::{ - IdentifierT, VoteWeight, Voter, CandidatePtr, ExtendedBalance, setup_inputs, ElectionResult, + balancing, setup_inputs, CandidatePtr, ElectionResult, ExtendedBalance, IdentifierT, + PerThing128, VoteWeight, Voter, }; -use sp_std::prelude::*; use sp_arithmetic::{ - PerThing, InnerOf, Rational128, helpers_128bit::multiply_by_rational, - traits::{Zero, Bounded}, + traits::{Bounded, Zero}, + Rational128, }; -use crate::balancing; +use sp_std::prelude::*; /// The denominator used for loads. Since votes are collected as u64, the smallest ratio that we /// might collect is `1/approval_stake` where approval stake is the sum of votes. Hence, some number @@ -63,12 +63,12 @@ const DEN: ExtendedBalance = ExtendedBalance::max_value(); /// `expect` this to return `Ok`. /// /// This can only fail if the normalization fails. -pub fn seq_phragmen( +pub fn seq_phragmen( rounds: usize, initial_candidates: Vec, initial_voters: Vec<(AccountId, VoteWeight, Vec)>, balance: Option<(usize, ExtendedBalance)>, -) -> Result, &'static str> where ExtendedBalance: From> { +) -> Result, crate::Error> { let (candidates, voters) = setup_inputs(initial_candidates, initial_voters); let (candidates, mut voters) = seq_phragmen_core::( @@ -93,11 +93,16 @@ pub fn seq_phragmen( // sort winners based on desirability. winners.sort_by_key(|c_ptr| c_ptr.borrow().round); - let mut assignments = voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); - let _ = assignments.iter_mut().map(|a| a.try_normalize()).collect::>()?; - let winners = winners.into_iter().map(|w_ptr| - (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake) - ).collect(); + let mut assignments = + voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); + let _ = assignments + .iter_mut() + .map(|a| a.try_normalize().map_err(|e| crate::Error::ArithmeticError(e))) + .collect::>()?; + let winners = winners + .into_iter() + .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) + .collect(); Ok(ElectionResult { winners, assignments }) } @@ -108,14 +113,13 @@ pub fn seq_phragmen( /// `seq_phragmen` for more information. This function is left public in case a crate needs to use /// the implementation in a custom way. /// -/// To create th inputs needed for this function, see [`crate::setup_inputs`]. -/// /// This can only fail if the normalization fails. +// To create the inputs needed for this function, see [`crate::setup_inputs`]. pub fn seq_phragmen_core( rounds: usize, candidates: Vec>, mut voters: Vec>, -) -> Result<(Vec>, Vec>), &'static str> { +) -> Result<(Vec>, Vec>), crate::Error> { // we have already checked that we have more candidates than minimum_candidate_count. let to_elect = rounds.min(candidates.len()); @@ -199,7 +203,7 @@ pub fn seq_phragmen_core( // edge of all candidates that eventually have a non-zero weight must be elected. debug_assert!(voter.edges.iter().all(|e| e.candidate.borrow().elected)); // inc budget to sum the budget. - voter.try_normalize_elected()?; + voter.try_normalize_elected().map_err(|e| crate::Error::ArithmeticError(e))?; } Ok((candidates, voters)) diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index 9b59e22c249b6..ad93d2f18ef9a 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,9 +23,9 @@ use crate::{ IdentifierT, ElectionResult, ExtendedBalance, setup_inputs, VoteWeight, Voter, CandidatePtr, - balance, + balance, PerThing128, }; -use sp_arithmetic::{PerThing, InnerOf, Rational128, traits::Bounded}; +use sp_arithmetic::{PerThing, Rational128, traits::Bounded}; use sp_std::{prelude::*, rc::Rc}; /// Execute the phragmms method. @@ -41,14 +41,12 @@ use sp_std::{prelude::*, rc::Rc}; /// assignments, `assignment.distribution.map(|p| p.deconstruct()).sum()` fails to fit inside /// `UpperOf